diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vleff.c @@ -1,12 +1,11 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ -// RUN: -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s \ -// RUN: -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -20,7 +19,6 @@ return vle8ff_v_i8mf8(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -34,7 +32,6 @@ return vle8ff_v_i8mf4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -48,7 +45,6 @@ return vle8ff_v_i8mf2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -62,7 +58,6 @@ return vle8ff_v_i8m1(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -76,7 +71,6 @@ return vle8ff_v_i8m2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -90,7 +84,6 @@ return vle8ff_v_i8m4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -104,7 +97,6 @@ return vle8ff_v_i8m8(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -118,7 +110,6 @@ return vle8ff_v_u8mf8(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -132,7 +123,6 @@ return vle8ff_v_u8mf4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -146,7 +136,6 @@ return vle8ff_v_u8mf2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -160,7 +149,6 @@ return vle8ff_v_u8m1(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -174,7 +162,6 @@ return vle8ff_v_u8m2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -188,7 +175,6 @@ return vle8ff_v_u8m4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -202,7 +188,6 @@ return vle8ff_v_u8m8(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -216,7 +201,6 @@ return vle8ff_v_i8mf8_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -230,7 +214,6 @@ return vle8ff_v_i8mf4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -244,7 +227,6 @@ return vle8ff_v_i8mf2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -258,7 +240,6 @@ return vle8ff_v_i8m1_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -272,7 +253,6 @@ return vle8ff_v_i8m2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -286,7 +266,6 @@ return vle8ff_v_i8m4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -300,7 +279,6 @@ return vle8ff_v_i8m8_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -314,7 +292,6 @@ return vle8ff_v_u8mf8_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -328,7 +305,6 @@ return vle8ff_v_u8mf4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -342,7 +318,6 @@ return vle8ff_v_u8mf2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -356,7 +331,6 @@ return vle8ff_v_u8m1_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -370,7 +344,6 @@ return vle8ff_v_u8m2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -384,7 +357,6 @@ return vle8ff_v_u8m4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle8ff_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -398,7 +370,6 @@ return vle8ff_v_u8m8_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -412,7 +383,6 @@ return vle16ff_v_i16mf4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -426,7 +396,6 @@ return vle16ff_v_i16mf2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -440,7 +409,6 @@ return vle16ff_v_i16m1(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -454,7 +422,6 @@ return vle16ff_v_i16m2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -468,7 +435,6 @@ return vle16ff_v_i16m4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -482,7 +448,6 @@ return vle16ff_v_i16m8(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -496,7 +461,6 @@ return vle16ff_v_u16mf4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -510,7 +474,6 @@ return vle16ff_v_u16mf2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -524,7 +487,6 @@ return vle16ff_v_u16m1(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -538,7 +500,6 @@ return vle16ff_v_u16m2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -552,7 +513,6 @@ return vle16ff_v_u16m4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -566,7 +526,6 @@ return vle16ff_v_u16m8(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -580,7 +539,6 @@ return vle16ff_v_i16mf4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -594,7 +552,6 @@ return vle16ff_v_i16mf2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -608,7 +565,6 @@ return vle16ff_v_i16m1_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -622,7 +578,6 @@ return vle16ff_v_i16m2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -636,7 +591,6 @@ return vle16ff_v_i16m4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -650,7 +604,6 @@ return vle16ff_v_i16m8_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -664,7 +617,6 @@ return vle16ff_v_u16mf4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -678,7 +630,6 @@ return vle16ff_v_u16mf2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -692,7 +643,6 @@ return vle16ff_v_u16m1_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -706,7 +656,6 @@ return vle16ff_v_u16m2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -720,7 +669,6 @@ return vle16ff_v_u16m4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle16ff_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -734,7 +682,6 @@ return vle16ff_v_u16m8_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -748,7 +695,6 @@ return vle32ff_v_i32mf2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -762,7 +708,6 @@ return vle32ff_v_i32m1(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -776,7 +721,6 @@ return vle32ff_v_i32m2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -790,7 +734,6 @@ return vle32ff_v_i32m4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -804,7 +747,6 @@ return vle32ff_v_i32m8(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -818,7 +760,6 @@ return vle32ff_v_u32mf2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -832,7 +773,6 @@ return vle32ff_v_u32m1(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -846,7 +786,6 @@ return vle32ff_v_u32m2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -860,7 +799,6 @@ return vle32ff_v_u32m4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -874,7 +812,6 @@ return vle32ff_v_u32m8(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -888,7 +825,6 @@ return vle32ff_v_f32mf2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -902,7 +838,6 @@ return vle32ff_v_f32m1(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -916,7 +851,6 @@ return vle32ff_v_f32m2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -930,7 +864,6 @@ return vle32ff_v_f32m4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -944,7 +877,6 @@ return vle32ff_v_f32m8(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -958,7 +890,6 @@ return vle32ff_v_i32mf2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -972,7 +903,6 @@ return vle32ff_v_i32m1_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -986,7 +916,6 @@ return vle32ff_v_i32m2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1000,7 +929,6 @@ return vle32ff_v_i32m4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1014,7 +942,6 @@ return vle32ff_v_i32m8_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1028,7 +955,6 @@ return vle32ff_v_u32mf2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1042,7 +968,6 @@ return vle32ff_v_u32m1_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1056,7 +981,6 @@ return vle32ff_v_u32m2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1070,7 +994,6 @@ return vle32ff_v_u32m4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1084,7 +1007,6 @@ return vle32ff_v_u32m8_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1098,7 +1020,6 @@ return vle32ff_v_f32mf2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1112,7 +1033,6 @@ return vle32ff_v_f32m1_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1126,7 +1046,6 @@ return vle32ff_v_f32m2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1140,7 +1059,6 @@ return vle32ff_v_f32m4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle32ff_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1154,7 +1072,6 @@ return vle32ff_v_f32m8_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1168,7 +1085,6 @@ return vle64ff_v_i64m1(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1182,7 +1098,6 @@ return vle64ff_v_i64m2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1196,7 +1111,6 @@ return vle64ff_v_i64m4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1210,7 +1124,6 @@ return vle64ff_v_i64m8(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1224,7 +1137,6 @@ return vle64ff_v_u64m1(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1238,7 +1150,6 @@ return vle64ff_v_u64m2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1252,7 +1163,6 @@ return vle64ff_v_u64m4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1266,7 +1176,6 @@ return vle64ff_v_u64m8(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1280,7 +1189,6 @@ return vle64ff_v_f64m1(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1294,7 +1202,6 @@ return vle64ff_v_f64m2(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1308,7 +1215,6 @@ return vle64ff_v_f64m4(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1322,7 +1228,6 @@ return vle64ff_v_f64m8(base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1336,7 +1241,6 @@ return vle64ff_v_i64m1_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1350,7 +1254,6 @@ return vle64ff_v_i64m2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1364,7 +1267,6 @@ return vle64ff_v_i64m4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1378,7 +1280,6 @@ return vle64ff_v_i64m8_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1392,7 +1293,6 @@ return vle64ff_v_u64m1_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1406,7 +1306,6 @@ return vle64ff_v_u64m2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1420,7 +1319,6 @@ return vle64ff_v_u64m4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1434,7 +1332,6 @@ return vle64ff_v_u64m8_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1448,7 +1345,6 @@ return vle64ff_v_f64m1_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1462,7 +1358,6 @@ return vle64ff_v_f64m2_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1476,7 +1371,6 @@ return vle64ff_v_f64m4_m(mask, maskedoff, base, new_vl, vl); } -// // CHECK-RV64-LABEL: @test_vle64ff_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1490,3 +1384,158 @@ return vle64ff_v_f64m8_m(mask, maskedoff, base, new_vl, vl); } +// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv1f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16mf4_t test_vle16ff_v_f16mf4 (const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16mf4 (base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv2f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16mf2_t test_vle16ff_v_f16mf2 (const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16mf2 (base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv4f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16m1_t test_vle16ff_v_f16m1 (const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m1 (base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv8f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16m2_t test_vle16ff_v_f16m2 (const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m2 (base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv16f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16m4_t test_vle16ff_v_f16m4 (const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m4 (base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.nxv32f16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16m8_t test_vle16ff_v_f16m8 (const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m8 (base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16mf4_t test_vle16ff_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16mf4_m (mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16mf2_t test_vle16ff_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16mf2_m (mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16m1_t test_vle16ff_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m1_m (mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16m2_t test_vle16ff_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m2_m (mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16m4_t test_vle16ff_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m4_m (mask, maskedoff, base, new_vl, vl); +} + +// CHECK-RV64-LABEL: @test_vle16ff_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call { , i64 } @llvm.riscv.vleff.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { , i64 } [[TMP1]], 0 +// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { , i64 } [[TMP1]], 1 +// CHECK-RV64-NEXT: store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 8 +// CHECK-RV64-NEXT: ret [[TMP2]] +// +vfloat16m8_t test_vle16ff_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, size_t *new_vl, size_t vl) { + return vle16ff_v_f16m8_m (mask, maskedoff, base, new_vl, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxei.c @@ -1,4208 +1,4247 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// -// CHECK-RV64-LABEL: @testuxei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { +vint8mf8_t test_vloxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_i8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { +vint8mf4_t test_vloxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_i8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { +vint8mf2_t test_vloxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_i8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { +vint8m1_t test_vloxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_i8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m2( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t testuxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { +vint8m2_t test_vloxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_i8m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m4( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t testuxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { +vint8m4_t test_vloxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { return vloxei8_v_i8m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m8( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m8_t testuxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { +vint8m8_t test_vloxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { return vloxei8_v_i8m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { +vint8mf8_t test_vloxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_i8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { +vint8mf4_t test_vloxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_i8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { +vint8mf2_t test_vloxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_i8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { +vint8m1_t test_vloxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_i8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8m2( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t testuxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { +vint8m2_t test_vloxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_i8m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8m4( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t testuxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { +vint8m4_t test_vloxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { return vloxei16_v_i8m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { +vint8mf8_t test_vloxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_i8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { +vint8mf4_t test_vloxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_i8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { +vint8mf2_t test_vloxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_i8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { +vint8m1_t test_vloxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_i8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8m2( +// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t testuxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { +vint8m2_t test_vloxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_i8m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { +vint8mf8_t test_vloxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_i8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { +vint8mf4_t test_vloxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_i8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { +vint8mf2_t test_vloxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_i8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { +vint8m1_t test_vloxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_i8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { +vint16mf4_t test_vloxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_i16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { +vint16mf2_t test_vloxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_i16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { +vint16m1_t test_vloxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_i16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m2( +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { +vint16m2_t test_vloxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_i16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m4( +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t testuxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { +vint16m4_t test_vloxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_i16m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m8( +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t testuxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { +vint16m8_t test_vloxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { return vloxei8_v_i16m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { +vint16mf4_t test_vloxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_i16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { +vint16mf2_t test_vloxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_i16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { +vint16m1_t test_vloxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_i16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m2( +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { +vint16m2_t test_vloxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_i16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m4( +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t testuxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { +vint16m4_t test_vloxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_i16m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m8( +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t testuxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { +vint16m8_t test_vloxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { return vloxei16_v_i16m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { +vint16mf4_t test_vloxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_i16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { +vint16mf2_t test_vloxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_i16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { +vint16m1_t test_vloxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_i16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16m2( +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { +vint16m2_t test_vloxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_i16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16m4( +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t testuxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { +vint16m4_t test_vloxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_i16m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { +vint16mf4_t test_vloxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_i16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { +vint16mf2_t test_vloxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_i16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { +vint16m1_t test_vloxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_i16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16m2( +// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { +vint16m2_t test_vloxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_i16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { +vint32mf2_t test_vloxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_i32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { +vint32m1_t test_vloxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_i32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m2( +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { +vint32m2_t test_vloxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_i32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m4( +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { +vint32m4_t test_vloxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_i32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m8( +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t testuxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { +vint32m8_t test_vloxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_i32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { +vint32mf2_t test_vloxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_i32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { +vint32m1_t test_vloxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_i32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m2( +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { +vint32m2_t test_vloxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_i32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m4( +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { +vint32m4_t test_vloxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_i32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m8( +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t testuxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { +vint32m8_t test_vloxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_i32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { +vint32mf2_t test_vloxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_i32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { +vint32m1_t test_vloxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_i32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m2( +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { +vint32m2_t test_vloxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_i32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m4( +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { +vint32m4_t test_vloxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_i32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m8( +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t testuxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { +vint32m8_t test_vloxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_i32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { +vint32mf2_t test_vloxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_i32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { +vint32m1_t test_vloxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_i32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32m2( +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { +vint32m2_t test_vloxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_i32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32m4( +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { +vint32m4_t test_vloxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_i32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { +vint64m1_t test_vloxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_i64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m2( +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { +vint64m2_t test_vloxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_i64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m4( +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { +vint64m4_t test_vloxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_i64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m8( +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { +vint64m8_t test_vloxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_i64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { +vint64m1_t test_vloxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_i64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m2( +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { +vint64m2_t test_vloxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_i64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m4( +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { +vint64m4_t test_vloxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_i64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m8( +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { +vint64m8_t test_vloxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_i64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { +vint64m1_t test_vloxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_i64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m2( +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { +vint64m2_t test_vloxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_i64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m4( +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { +vint64m4_t test_vloxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_i64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m8( +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { +vint64m8_t test_vloxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_i64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { +vint64m1_t test_vloxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_i64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m2( +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { +vint64m2_t test_vloxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_i64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m4( +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { +vint64m4_t test_vloxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_i64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m8( +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { +vint64m8_t test_vloxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_i64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +vuint8mf8_t test_vloxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_u8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +vuint8mf4_t test_vloxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_u8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +vuint8mf2_t test_vloxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_u8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { +vuint8m1_t test_vloxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_u8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m2( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t testuxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { +vuint8m2_t test_vloxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_u8m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m4( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t testuxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { +vuint8m4_t test_vloxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { return vloxei8_v_u8m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m8( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m8_t testuxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { +vuint8m8_t test_vloxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { return vloxei8_v_u8m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +vuint8mf8_t test_vloxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_u8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +vuint8mf4_t test_vloxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_u8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { +vuint8mf2_t test_vloxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_u8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { +vuint8m1_t test_vloxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_u8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8m2( +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t testuxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { +vuint8m2_t test_vloxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_u8m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8m4( +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t testuxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { +vuint8m4_t test_vloxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { return vloxei16_v_u8m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +vuint8mf8_t test_vloxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_u8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { +vuint8mf4_t test_vloxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_u8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { +vuint8mf2_t test_vloxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_u8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { +vuint8m1_t test_vloxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_u8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8m2( +// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t testuxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { +vuint8m2_t test_vloxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_u8m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { +vuint8mf8_t test_vloxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_u8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { +vuint8mf4_t test_vloxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_u8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { +vuint8mf2_t test_vloxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_u8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { +vuint8m1_t test_vloxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_u8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +vuint16mf4_t test_vloxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_u16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +vuint16mf2_t test_vloxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_u16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +vuint16m1_t test_vloxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_u16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m2( +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { +vuint16m2_t test_vloxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_u16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m4( +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t testuxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { +vuint16m4_t test_vloxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_u16m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m8( +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t testuxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { +vuint16m8_t test_vloxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { return vloxei8_v_u16m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +vuint16mf4_t test_vloxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_u16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +vuint16mf2_t test_vloxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_u16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { +vuint16m1_t test_vloxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_u16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m2( +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { +vuint16m2_t test_vloxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_u16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m4( +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t testuxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { +vuint16m4_t test_vloxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_u16m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m8( +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t testuxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { +vuint16m8_t test_vloxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { return vloxei16_v_u16m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +vuint16mf4_t test_vloxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_u16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { +vuint16mf2_t test_vloxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_u16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { +vuint16m1_t test_vloxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_u16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16m2( +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { +vuint16m2_t test_vloxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_u16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16m4( +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t testuxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { +vuint16m4_t test_vloxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_u16m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { +vuint16mf4_t test_vloxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_u16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { +vuint16mf2_t test_vloxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_u16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { +vuint16m1_t test_vloxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_u16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16m2( +// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { +vuint16m2_t test_vloxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_u16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +vuint32mf2_t test_vloxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_u32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +vuint32m1_t test_vloxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_u32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m2( +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { +vuint32m2_t test_vloxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_u32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m4( +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { +vuint32m4_t test_vloxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_u32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m8( +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t testuxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { +vuint32m8_t test_vloxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_u32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +vuint32mf2_t test_vloxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_u32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +vuint32m1_t test_vloxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_u32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m2( +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { +vuint32m2_t test_vloxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_u32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m4( +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { +vuint32m4_t test_vloxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_u32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m8( +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t testuxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { +vuint32m8_t test_vloxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_u32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +vuint32mf2_t test_vloxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_u32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { +vuint32m1_t test_vloxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_u32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m2( +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { +vuint32m2_t test_vloxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_u32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m4( +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { +vuint32m4_t test_vloxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_u32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m8( +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t testuxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { +vuint32m8_t test_vloxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_u32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { +vuint32mf2_t test_vloxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_u32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { +vuint32m1_t test_vloxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_u32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32m2( +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { +vuint32m2_t test_vloxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_u32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32m4( +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { +vuint32m4_t test_vloxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_u32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +vuint64m1_t test_vloxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_u64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m2( +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { +vuint64m2_t test_vloxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_u64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m4( +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { +vuint64m4_t test_vloxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_u64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m8( +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { +vuint64m8_t test_vloxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_u64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +vuint64m1_t test_vloxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_u64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m2( +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { +vuint64m2_t test_vloxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_u64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m4( +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { +vuint64m4_t test_vloxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_u64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m8( +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { +vuint64m8_t test_vloxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_u64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +vuint64m1_t test_vloxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_u64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m2( +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { +vuint64m2_t test_vloxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_u64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m4( +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { +vuint64m4_t test_vloxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_u64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m8( +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { +vuint64m8_t test_vloxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_u64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { +vuint64m1_t test_vloxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_u64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m2( +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { +vuint64m2_t test_vloxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_u64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m4( +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { +vuint64m4_t test_vloxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_u64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m8( +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { +vuint64m8_t test_vloxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_u64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { +vfloat32mf2_t test_vloxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_f32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { +vfloat32m1_t test_vloxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_f32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { +vfloat32m2_t test_vloxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_f32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { +vfloat32m4_t test_vloxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_f32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m8( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t testuxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { +vfloat32m8_t test_vloxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_f32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { +vfloat32mf2_t test_vloxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_f32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { +vfloat32m1_t test_vloxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_f32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { +vfloat32m2_t test_vloxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_f32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m4( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { +vfloat32m4_t test_vloxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_f32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m8( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t testuxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { +vfloat32m8_t test_vloxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_f32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { +vfloat32mf2_t test_vloxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_f32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { +vfloat32m1_t test_vloxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_f32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { +vfloat32m2_t test_vloxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_f32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m4( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { +vfloat32m4_t test_vloxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_f32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m8( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t testuxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { +vfloat32m8_t test_vloxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_f32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { +vfloat32mf2_t test_vloxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_f32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { +vfloat32m1_t test_vloxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_f32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { +vfloat32m2_t test_vloxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_f32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32m4( +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { +vfloat32m4_t test_vloxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_f32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { +vfloat64m1_t test_vloxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_f64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { +vfloat64m2_t test_vloxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_f64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m4( +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { +vfloat64m4_t test_vloxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_f64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m8( +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { +vfloat64m8_t test_vloxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_f64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { +vfloat64m1_t test_vloxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_f64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { +vfloat64m2_t test_vloxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_f64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m4( +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { +vfloat64m4_t test_vloxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_f64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m8( +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { +vfloat64m8_t test_vloxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_f64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { +vfloat64m1_t test_vloxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_f64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { +vfloat64m2_t test_vloxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_f64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m4( +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { +vfloat64m4_t test_vloxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_f64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m8( +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { +vfloat64m8_t test_vloxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_f64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { +vfloat64m1_t test_vloxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_f64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { +vfloat64m2_t test_vloxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_f64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m4( +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { +vfloat64m4_t test_vloxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_f64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m8( +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { +vfloat64m8_t test_vloxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_f64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +vint8mf8_t test_vloxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +vint8mf4_t test_vloxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +vint8mf2_t test_vloxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { +vint8m1_t test_vloxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t testuxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { +vint8m2_t test_vloxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t testuxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { +vint8m4_t test_vloxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { return vloxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m8_t testuxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { +vint8m8_t test_vloxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { return vloxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +vint8mf8_t test_vloxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +vint8mf4_t test_vloxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { +vint8mf2_t test_vloxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { +vint8m1_t test_vloxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t testuxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { +vint8m2_t test_vloxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t testuxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { +vint8m4_t test_vloxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { return vloxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +vint8mf8_t test_vloxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { +vint8mf4_t test_vloxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { +vint8mf2_t test_vloxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { +vint8m1_t test_vloxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t testuxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { +vint8m2_t test_vloxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { +vint8mf8_t test_vloxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { +vint8mf4_t test_vloxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { +vint8mf2_t test_vloxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { +vint8m1_t test_vloxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +vint16mf4_t test_vloxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +vint16mf2_t test_vloxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +vint16m1_t test_vloxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { +vint16m2_t test_vloxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t testuxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { +vint16m4_t test_vloxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t testuxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { +vint16m8_t test_vloxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { return vloxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +vint16mf4_t test_vloxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +vint16mf2_t test_vloxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { +vint16m1_t test_vloxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { +vint16m2_t test_vloxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t testuxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { +vint16m4_t test_vloxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m8_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t testuxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { +vint16m8_t test_vloxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { return vloxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +vint16mf4_t test_vloxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { +vint16mf2_t test_vloxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { +vint16m1_t test_vloxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { +vint16m2_t test_vloxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t testuxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { +vint16m4_t test_vloxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { +vint16mf4_t test_vloxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { +vint16mf2_t test_vloxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { +vint16m1_t test_vloxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { +vint16m2_t test_vloxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +vint32mf2_t test_vloxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +vint32m1_t test_vloxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { +vint32m2_t test_vloxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { +vint32m4_t test_vloxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t testuxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { +vint32m8_t test_vloxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +vint32mf2_t test_vloxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +vint32m1_t test_vloxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { +vint32m2_t test_vloxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { +vint32m4_t test_vloxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m8_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t testuxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { +vint32m8_t test_vloxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +vint32mf2_t test_vloxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { +vint32m1_t test_vloxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { +vint32m2_t test_vloxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { +vint32m4_t test_vloxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m8_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t testuxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { +vint32m8_t test_vloxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { +vint32mf2_t test_vloxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { +vint32m1_t test_vloxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { +vint32m2_t test_vloxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { +vint32m4_t test_vloxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +vint64m1_t test_vloxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { +vint64m2_t test_vloxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { +vint64m4_t test_vloxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { +vint64m8_t test_vloxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +vint64m1_t test_vloxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { +vint64m2_t test_vloxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { +vint64m4_t test_vloxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { +vint64m8_t test_vloxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +vint64m1_t test_vloxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { +vint64m2_t test_vloxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { +vint64m4_t test_vloxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { +vint64m8_t test_vloxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { +vint64m1_t test_vloxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { +vint64m2_t test_vloxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { +vint64m4_t test_vloxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { +vint64m8_t test_vloxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +vuint8mf8_t test_vloxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +vuint8mf4_t test_vloxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +vuint8mf2_t test_vloxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +vuint8m1_t test_vloxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t testuxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { +vuint8m2_t test_vloxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t testuxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { +vuint8m4_t test_vloxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { return vloxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m8_t testuxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { +vuint8m8_t test_vloxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { return vloxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +vuint8mf8_t test_vloxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +vuint8mf4_t test_vloxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +vuint8mf2_t test_vloxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +vuint8m1_t test_vloxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t testuxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { +vuint8m2_t test_vloxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t testuxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { +vuint8m4_t test_vloxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { return vloxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +vuint8mf8_t test_vloxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +vuint8mf4_t test_vloxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +vuint8mf2_t test_vloxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +vuint8m1_t test_vloxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t testuxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { +vuint8m2_t test_vloxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +vuint8mf8_t test_vloxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +vuint8mf4_t test_vloxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +vuint8mf2_t test_vloxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +vuint8m1_t test_vloxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +vuint16mf4_t test_vloxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +vuint16mf2_t test_vloxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +vuint16m1_t test_vloxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { +vuint16m2_t test_vloxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t testuxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { +vuint16m4_t test_vloxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t testuxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { +vuint16m8_t test_vloxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { return vloxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +vuint16mf4_t test_vloxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +vuint16mf2_t test_vloxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +vuint16m1_t test_vloxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { +vuint16m2_t test_vloxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t testuxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { +vuint16m4_t test_vloxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m8_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t testuxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { +vuint16m8_t test_vloxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { return vloxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +vuint16mf4_t test_vloxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +vuint16mf2_t test_vloxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +vuint16m1_t test_vloxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { +vuint16m2_t test_vloxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t testuxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { +vuint16m4_t test_vloxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +vuint16mf4_t test_vloxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +vuint16mf2_t test_vloxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +vuint16m1_t test_vloxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { +vuint16m2_t test_vloxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +vuint32mf2_t test_vloxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +vuint32m1_t test_vloxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { +vuint32m2_t test_vloxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { +vuint32m4_t test_vloxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t testuxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { +vuint32m8_t test_vloxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +vuint32mf2_t test_vloxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +vuint32m1_t test_vloxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { +vuint32m2_t test_vloxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { +vuint32m4_t test_vloxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m8_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t testuxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { +vuint32m8_t test_vloxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +vuint32mf2_t test_vloxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +vuint32m1_t test_vloxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { +vuint32m2_t test_vloxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { +vuint32m4_t test_vloxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m8_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t testuxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { +vuint32m8_t test_vloxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +vuint32mf2_t test_vloxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +vuint32m1_t test_vloxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { +vuint32m2_t test_vloxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { +vuint32m4_t test_vloxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +vuint64m1_t test_vloxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { +vuint64m2_t test_vloxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { +vuint64m4_t test_vloxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { +vuint64m8_t test_vloxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +vuint64m1_t test_vloxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { +vuint64m2_t test_vloxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { +vuint64m4_t test_vloxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { +vuint64m8_t test_vloxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +vuint64m1_t test_vloxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { +vuint64m2_t test_vloxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { +vuint64m4_t test_vloxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { +vuint64m8_t test_vloxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +vuint64m1_t test_vloxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { +vuint64m2_t test_vloxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { +vuint64m4_t test_vloxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { +vuint64m8_t test_vloxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { +vfloat32mf2_t test_vloxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { +vfloat32m1_t test_vloxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { +vfloat32m2_t test_vloxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { +vfloat32m4_t test_vloxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t testuxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { +vfloat32m8_t test_vloxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { return vloxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { +vfloat32mf2_t test_vloxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { +vfloat32m1_t test_vloxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { +vfloat32m2_t test_vloxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { +vfloat32m4_t test_vloxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t testuxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { +vfloat32m8_t test_vloxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { return vloxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { +vfloat32mf2_t test_vloxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { +vfloat32m1_t test_vloxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { +vfloat32m2_t test_vloxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { +vfloat32m4_t test_vloxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t testuxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { +vfloat32m8_t test_vloxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { return vloxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { +vfloat32mf2_t test_vloxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { +vfloat32m1_t test_vloxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { +vfloat32m2_t test_vloxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { +vfloat32m4_t test_vloxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { +vfloat64m1_t test_vloxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { return vloxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { +vfloat64m2_t test_vloxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { return vloxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { +vfloat64m4_t test_vloxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { return vloxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vloxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { +vfloat64m8_t test_vloxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { return vloxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { +vfloat64m1_t test_vloxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { return vloxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { +vfloat64m2_t test_vloxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { return vloxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { +vfloat64m4_t test_vloxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { return vloxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vloxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { +vfloat64m8_t test_vloxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { return vloxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { +vfloat64m1_t test_vloxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { return vloxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { +vfloat64m2_t test_vloxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { return vloxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { +vfloat64m4_t test_vloxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { return vloxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vloxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { +vfloat64m8_t test_vloxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { return vloxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { +vfloat64m1_t test_vloxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { return vloxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { +vfloat64m2_t test_vloxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { return vloxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { +vfloat64m4_t test_vloxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { return vloxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vloxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { +vfloat64m8_t test_vloxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { return vloxei64_v_f64m8_m(mask, maskedoff, base, bindex, vl); } + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei8_v_f16mf4 (const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_f16mf4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei8_v_f16mf2 (const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_f16mf2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei8_v_f16m1 (const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_f16m1 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei8_v_f16m2 (const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_f16m2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei8_v_f16m4 (const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_f16m4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei8_v_f16m8 (const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vloxei8_v_f16m8 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei16_v_f16mf4 (const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_f16mf4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei16_v_f16mf2 (const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_f16mf2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei16_v_f16m1 (const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_f16m1 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei16_v_f16m2 (const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_f16m2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei16_v_f16m4 (const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_f16m4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv32f16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei16_v_f16m8 (const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vloxei16_v_f16m8 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei32_v_f16mf4 (const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_f16mf4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei32_v_f16mf2 (const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_f16mf2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei32_v_f16m1 (const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_f16m1 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei32_v_f16m2 (const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_f16m2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv16f16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei32_v_f16m4 (const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxei32_v_f16m4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv1f16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei64_v_f16mf4 (const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_f16mf4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv2f16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei64_v_f16mf2 (const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_f16mf2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv4f16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei64_v_f16m1 (const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_f16m1 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.nxv8f16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei64_v_f16m2 (const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_f16m2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei8_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vloxei8_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei8_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vloxei8_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei8_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vloxei8_v_f16m1_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei8_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vloxei8_v_f16m2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei8_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vloxei8_v_f16m4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei8_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei8_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vloxei8_v_f16m8_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vloxei16_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vloxei16_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vloxei16_v_f16m1_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vloxei16_v_f16m2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vloxei16_v_f16m4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vloxei16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vloxei16_v_f16m8_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei32_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vloxei32_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei32_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vloxei32_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei32_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vloxei32_v_f16m1_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei32_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vloxei32_v_f16m2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vloxei32_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vloxei32_v_f16m4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vloxei64_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vloxei64_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vloxei64_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vloxei64_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vloxei64_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vloxei64_v_f16m1_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vloxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vloxei64_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vloxei64_v_f16m2_m (mask, maskedoff, base, bindex, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlse.c @@ -1,11 +1,11 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -17,7 +17,6 @@ return vlse8_v_i8mf8(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -29,7 +28,6 @@ return vlse8_v_i8mf4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -41,7 +39,6 @@ return vlse8_v_i8mf2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -52,7 +49,6 @@ return vlse8_v_i8m1(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -63,7 +59,6 @@ return vlse8_v_i8m2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -74,7 +69,6 @@ return vlse8_v_i8m4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -85,7 +79,6 @@ return vlse8_v_i8m8(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -97,7 +90,6 @@ return vlse16_v_i16mf4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -109,7 +101,6 @@ return vlse16_v_i16mf2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -121,7 +112,6 @@ return vlse16_v_i16m1(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -133,7 +123,6 @@ return vlse16_v_i16m2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -145,7 +134,6 @@ return vlse16_v_i16m4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -157,7 +145,6 @@ return vlse16_v_i16m8(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -169,7 +156,6 @@ return vlse32_v_i32mf2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -181,7 +167,6 @@ return vlse32_v_i32m1(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -193,7 +178,6 @@ return vlse32_v_i32m2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -205,7 +189,6 @@ return vlse32_v_i32m4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -217,7 +200,6 @@ return vlse32_v_i32m8(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -229,7 +211,6 @@ return vlse64_v_i64m1(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -241,7 +222,6 @@ return vlse64_v_i64m2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -253,7 +233,6 @@ return vlse64_v_i64m4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -265,7 +244,6 @@ return vlse64_v_i64m8(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -277,7 +255,6 @@ return vlse8_v_u8mf8(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -289,7 +266,6 @@ return vlse8_v_u8mf4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -301,7 +277,6 @@ return vlse8_v_u8mf2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -313,7 +288,6 @@ return vlse8_v_u8m1(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -325,7 +299,6 @@ return vlse8_v_u8m2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -337,7 +310,6 @@ return vlse8_v_u8m4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -349,7 +321,6 @@ return vlse8_v_u8m8(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -361,7 +332,6 @@ return vlse16_v_u16mf4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -373,7 +343,6 @@ return vlse16_v_u16mf2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -385,7 +354,6 @@ return vlse16_v_u16m1(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -397,7 +365,6 @@ return vlse16_v_u16m2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -409,7 +376,6 @@ return vlse16_v_u16m4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -421,7 +387,6 @@ return vlse16_v_u16m8(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -433,7 +398,6 @@ return vlse32_v_u32mf2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -445,7 +409,6 @@ return vlse32_v_u32m1(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -457,7 +420,6 @@ return vlse32_v_u32m2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -469,7 +431,6 @@ return vlse32_v_u32m4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -481,7 +442,6 @@ return vlse32_v_u32m8(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -493,7 +453,6 @@ return vlse64_v_u64m1(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -505,7 +464,6 @@ return vlse64_v_u64m2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -517,7 +475,6 @@ return vlse64_v_u64m4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -529,7 +486,6 @@ return vlse64_v_u64m8(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -541,7 +497,6 @@ return vlse32_v_f32mf2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -553,7 +508,6 @@ return vlse32_v_f32m1(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -565,7 +519,6 @@ return vlse32_v_f32m2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -577,7 +530,6 @@ return vlse32_v_f32m4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -589,7 +541,6 @@ return vlse32_v_f32m8(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -601,7 +552,6 @@ return vlse64_v_f64m1(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -613,7 +563,6 @@ return vlse64_v_f64m2(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -625,7 +574,6 @@ return vlse64_v_f64m4(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -637,7 +585,6 @@ return vlse64_v_f64m8(base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -650,7 +597,6 @@ return vlse8_v_i8mf8_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -663,7 +609,6 @@ return vlse8_v_i8mf4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -676,7 +621,6 @@ return vlse8_v_i8mf2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -689,7 +633,6 @@ return vlse8_v_i8m1_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -702,7 +645,6 @@ return vlse8_v_i8m2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -715,7 +657,6 @@ return vlse8_v_i8m4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -728,7 +669,6 @@ return vlse8_v_i8m8_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -741,7 +681,6 @@ return vlse16_v_i16mf4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -754,7 +693,6 @@ return vlse16_v_i16mf2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -767,7 +705,6 @@ return vlse16_v_i16m1_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -780,7 +717,6 @@ return vlse16_v_i16m2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -793,7 +729,6 @@ return vlse16_v_i16m4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -806,7 +741,6 @@ return vlse16_v_i16m8_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -819,7 +753,6 @@ return vlse32_v_i32mf2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -832,7 +765,6 @@ return vlse32_v_i32m1_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -845,7 +777,6 @@ return vlse32_v_i32m2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -858,7 +789,6 @@ return vlse32_v_i32m4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -871,7 +801,6 @@ return vlse32_v_i32m8_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -884,7 +813,6 @@ return vlse64_v_i64m1_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -897,7 +825,6 @@ return vlse64_v_i64m2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -910,7 +837,6 @@ return vlse64_v_i64m4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -923,7 +849,6 @@ return vlse64_v_i64m8_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -936,7 +861,6 @@ return vlse8_v_u8mf8_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -949,7 +873,6 @@ return vlse8_v_u8mf4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -962,7 +885,6 @@ return vlse8_v_u8mf2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -975,7 +897,6 @@ return vlse8_v_u8m1_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -988,7 +909,6 @@ return vlse8_v_u8m2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1001,7 +921,6 @@ return vlse8_v_u8m4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1014,7 +933,6 @@ return vlse8_v_u8m8_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1027,7 +945,6 @@ return vlse16_v_u16mf4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1040,7 +957,6 @@ return vlse16_v_u16mf2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1053,7 +969,6 @@ return vlse16_v_u16m1_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1066,7 +981,6 @@ return vlse16_v_u16m2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1079,7 +993,6 @@ return vlse16_v_u16m4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1092,7 +1005,6 @@ return vlse16_v_u16m8_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1105,7 +1017,6 @@ return vlse32_v_u32mf2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1118,7 +1029,6 @@ return vlse32_v_u32m1_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1131,7 +1041,6 @@ return vlse32_v_u32m2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1144,7 +1053,6 @@ return vlse32_v_u32m4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1157,7 +1065,6 @@ return vlse32_v_u32m8_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1170,7 +1077,6 @@ return vlse64_v_u64m1_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1183,7 +1089,6 @@ return vlse64_v_u64m2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1196,7 +1101,6 @@ return vlse64_v_u64m4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1209,7 +1113,6 @@ return vlse64_v_u64m8_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1222,7 +1125,6 @@ return vlse32_v_f32mf2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1235,7 +1137,6 @@ return vlse32_v_f32m1_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1248,7 +1149,6 @@ return vlse32_v_f32m2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1261,7 +1161,6 @@ return vlse32_v_f32m4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1274,7 +1173,6 @@ return vlse32_v_f32m8_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1287,7 +1185,6 @@ return vlse64_v_f64m1_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1300,7 +1197,6 @@ return vlse64_v_f64m2_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1313,7 +1209,6 @@ return vlse64_v_f64m4_m(mask, maskedoff, base, bstride, vl); } -// // CHECK-RV64-LABEL: @test_vlse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1325,3 +1220,123 @@ size_t vl) { return vlse64_v_f64m8_m(mask, maskedoff, base, bstride, vl); } + +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv1f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vlse16_v_f16mf4 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16mf4 (base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv2f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vlse16_v_f16mf2 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16mf2 (base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv4f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vlse16_v_f16m1 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m1 (base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv8f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vlse16_v_f16m2 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m2 (base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv16f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vlse16_v_f16m4 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m4 (base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.nxv32f16.i64(* [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vlse16_v_f16m8 (const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m8 (base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv1f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vlse16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16mf4_m (mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv2f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vlse16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16mf2_m (mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv4f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vlse16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m1_m (mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv8f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vlse16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m2_m (mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv16f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vlse16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m4_m (mask, maskedoff, base, bstride, vl); +} + +// CHECK-RV64-LABEL: @test_vlse16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vlse.mask.nxv32f16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vlse16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, ptrdiff_t bstride, size_t vl) { + return vlse16_v_f16m8_m (mask, maskedoff, base, bstride, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxei.c @@ -1,4208 +1,4248 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// -// CHECK-RV64-LABEL: @testuxei8_v_i8mf8( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { +vint8mf8_t test_vluxei8_v_i8mf8(const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_i8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8mf4( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { +vint8mf4_t test_vluxei8_v_i8mf4(const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_i8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8mf2( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { +vint8mf2_t test_vluxei8_v_i8mf2(const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_i8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m1( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { +vint8m1_t test_vluxei8_v_i8m1(const int8_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_i8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m2( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t testuxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { +vint8m2_t test_vluxei8_v_i8m2(const int8_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_i8m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m4( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t testuxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { +vint8m4_t test_vluxei8_v_i8m4(const int8_t *base, vuint8m4_t bindex, size_t vl) { return vluxei8_v_i8m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m8( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m8_t testuxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { +vint8m8_t test_vluxei8_v_i8m8(const int8_t *base, vuint8m8_t bindex, size_t vl) { return vluxei8_v_i8m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8mf8( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { +vint8mf8_t test_vluxei16_v_i8mf8(const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_i8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8mf4( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { +vint8mf4_t test_vluxei16_v_i8mf4(const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_i8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8mf2( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { +vint8mf2_t test_vluxei16_v_i8mf2(const int8_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_i8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8m1( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { +vint8m1_t test_vluxei16_v_i8m1(const int8_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_i8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8m2( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t testuxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { +vint8m2_t test_vluxei16_v_i8m2(const int8_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_i8m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8m4( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t testuxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { +vint8m4_t test_vluxei16_v_i8m4(const int8_t *base, vuint16m8_t bindex, size_t vl) { return vluxei16_v_i8m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8mf8( +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { +vint8mf8_t test_vluxei32_v_i8mf8(const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_i8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8mf4( +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { +vint8mf4_t test_vluxei32_v_i8mf4(const int8_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_i8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8mf2( +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { +vint8mf2_t test_vluxei32_v_i8mf2(const int8_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_i8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8m1( +// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { +vint8m1_t test_vluxei32_v_i8m1(const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_i8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8m2( +// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t testuxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { +vint8m2_t test_vluxei32_v_i8m2(const int8_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_i8m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8mf8( +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { +vint8mf8_t test_vluxei64_v_i8mf8(const int8_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_i8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8mf4( +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { +vint8mf4_t test_vluxei64_v_i8mf4(const int8_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_i8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8mf2( +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { +vint8mf2_t test_vluxei64_v_i8mf2(const int8_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_i8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8m1( +// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { +vint8m1_t test_vluxei64_v_i8m1(const int8_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_i8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16mf4( +// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { +vint16mf4_t test_vluxei8_v_i16mf4(const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_i16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16mf2( +// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { +vint16mf2_t test_vluxei8_v_i16mf2(const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_i16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m1( +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { +vint16m1_t test_vluxei8_v_i16m1(const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_i16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m2( +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { +vint16m2_t test_vluxei8_v_i16m2(const int16_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_i16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m4( +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t testuxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { +vint16m4_t test_vluxei8_v_i16m4(const int16_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_i16m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m8( +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t testuxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { +vint16m8_t test_vluxei8_v_i16m8(const int16_t *base, vuint8m4_t bindex, size_t vl) { return vluxei8_v_i16m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16mf4( +// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { +vint16mf4_t test_vluxei16_v_i16mf4(const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_i16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16mf2( +// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { +vint16mf2_t test_vluxei16_v_i16mf2(const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_i16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m1( +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { +vint16m1_t test_vluxei16_v_i16m1(const int16_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_i16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m2( +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { +vint16m2_t test_vluxei16_v_i16m2(const int16_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_i16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m4( +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t testuxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { +vint16m4_t test_vluxei16_v_i16m4(const int16_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_i16m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m8( +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t testuxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { +vint16m8_t test_vluxei16_v_i16m8(const int16_t *base, vuint16m8_t bindex, size_t vl) { return vluxei16_v_i16m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16mf4( +// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { +vint16mf4_t test_vluxei32_v_i16mf4(const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_i16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16mf2( +// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { +vint16mf2_t test_vluxei32_v_i16mf2(const int16_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_i16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16m1( +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { +vint16m1_t test_vluxei32_v_i16m1(const int16_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_i16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16m2( +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { +vint16m2_t test_vluxei32_v_i16m2(const int16_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_i16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16m4( +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t testuxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { +vint16m4_t test_vluxei32_v_i16m4(const int16_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_i16m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16mf4( +// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { +vint16mf4_t test_vluxei64_v_i16mf4(const int16_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_i16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16mf2( +// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { +vint16mf2_t test_vluxei64_v_i16mf2(const int16_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_i16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16m1( +// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { +vint16m1_t test_vluxei64_v_i16m1(const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_i16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16m2( +// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { +vint16m2_t test_vluxei64_v_i16m2(const int16_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_i16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32mf2( +// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { +vint32mf2_t test_vluxei8_v_i32mf2(const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_i32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m1( +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { +vint32m1_t test_vluxei8_v_i32m1(const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_i32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m2( +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { +vint32m2_t test_vluxei8_v_i32m2(const int32_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_i32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m4( +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { +vint32m4_t test_vluxei8_v_i32m4(const int32_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_i32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m8( +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t testuxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { +vint32m8_t test_vluxei8_v_i32m8(const int32_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_i32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32mf2( +// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { +vint32mf2_t test_vluxei16_v_i32mf2(const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_i32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m1( +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { +vint32m1_t test_vluxei16_v_i32m1(const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_i32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m2( +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { +vint32m2_t test_vluxei16_v_i32m2(const int32_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_i32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m4( +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { +vint32m4_t test_vluxei16_v_i32m4(const int32_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_i32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m8( +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t testuxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { +vint32m8_t test_vluxei16_v_i32m8(const int32_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_i32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32mf2( +// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { +vint32mf2_t test_vluxei32_v_i32mf2(const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_i32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m1( +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { +vint32m1_t test_vluxei32_v_i32m1(const int32_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_i32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m2( +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { +vint32m2_t test_vluxei32_v_i32m2(const int32_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_i32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m4( +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { +vint32m4_t test_vluxei32_v_i32m4(const int32_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_i32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m8( +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t testuxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { +vint32m8_t test_vluxei32_v_i32m8(const int32_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_i32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32mf2( +// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { +vint32mf2_t test_vluxei64_v_i32mf2(const int32_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_i32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32m1( +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { +vint32m1_t test_vluxei64_v_i32m1(const int32_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_i32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32m2( +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { +vint32m2_t test_vluxei64_v_i32m2(const int32_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_i32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32m4( +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { +vint32m4_t test_vluxei64_v_i32m4(const int32_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_i32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m1( +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { +vint64m1_t test_vluxei8_v_i64m1(const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_i64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m2( +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { +vint64m2_t test_vluxei8_v_i64m2(const int64_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_i64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m4( +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { +vint64m4_t test_vluxei8_v_i64m4(const int64_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_i64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m8( +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { +vint64m8_t test_vluxei8_v_i64m8(const int64_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_i64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m1( +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { +vint64m1_t test_vluxei16_v_i64m1(const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_i64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m2( +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { +vint64m2_t test_vluxei16_v_i64m2(const int64_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_i64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m4( +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { +vint64m4_t test_vluxei16_v_i64m4(const int64_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_i64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m8( +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { +vint64m8_t test_vluxei16_v_i64m8(const int64_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_i64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m1( +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { +vint64m1_t test_vluxei32_v_i64m1(const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_i64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m2( +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { +vint64m2_t test_vluxei32_v_i64m2(const int64_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_i64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m4( +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { +vint64m4_t test_vluxei32_v_i64m4(const int64_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_i64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m8( +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { +vint64m8_t test_vluxei32_v_i64m8(const int64_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_i64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m1( +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { +vint64m1_t test_vluxei64_v_i64m1(const int64_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_i64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m2( +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { +vint64m2_t test_vluxei64_v_i64m2(const int64_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_i64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m4( +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { +vint64m4_t test_vluxei64_v_i64m4(const int64_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_i64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m8( +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { +vint64m8_t test_vluxei64_v_i64m8(const int64_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_i64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8mf8( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +vuint8mf8_t test_vluxei8_v_u8mf8(const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_u8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8mf4( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +vuint8mf4_t test_vluxei8_v_u8mf4(const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_u8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +vuint8mf2_t test_vluxei8_v_u8mf2(const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_u8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m1( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { +vuint8m1_t test_vluxei8_v_u8m1(const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_u8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m2( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t testuxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { +vuint8m2_t test_vluxei8_v_u8m2(const uint8_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_u8m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m4( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t testuxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { +vuint8m4_t test_vluxei8_v_u8m4(const uint8_t *base, vuint8m4_t bindex, size_t vl) { return vluxei8_v_u8m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m8( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv64i8.nxv64i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m8_t testuxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { +vuint8m8_t test_vluxei8_v_u8m8(const uint8_t *base, vuint8m8_t bindex, size_t vl) { return vluxei8_v_u8m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8mf8( +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +vuint8mf8_t test_vluxei16_v_u8mf8(const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_u8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8mf4( +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +vuint8mf4_t test_vluxei16_v_u8mf4(const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_u8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { +vuint8mf2_t test_vluxei16_v_u8mf2(const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_u8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8m1( +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { +vuint8m1_t test_vluxei16_v_u8m1(const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_u8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8m2( +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t testuxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { +vuint8m2_t test_vluxei16_v_u8m2(const uint8_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_u8m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8m4( +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i8.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t testuxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { +vuint8m4_t test_vluxei16_v_u8m4(const uint8_t *base, vuint16m8_t bindex, size_t vl) { return vluxei16_v_u8m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8mf8( +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +vuint8mf8_t test_vluxei32_v_u8mf8(const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_u8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8mf4( +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { +vuint8mf4_t test_vluxei32_v_u8mf4(const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_u8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { +vuint8mf2_t test_vluxei32_v_u8mf2(const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_u8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8m1( +// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { +vuint8m1_t test_vluxei32_v_u8m1(const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_u8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8m2( +// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i8.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t testuxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { +vuint8m2_t test_vluxei32_v_u8m2(const uint8_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_u8m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8mf8( +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i8.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { +vuint8mf8_t test_vluxei64_v_u8mf8(const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_u8mf8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8mf4( +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i8.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { +vuint8mf4_t test_vluxei64_v_u8mf4(const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_u8mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8mf2( +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i8.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { +vuint8mf2_t test_vluxei64_v_u8mf2(const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_u8mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8m1( +// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i8.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { +vuint8m1_t test_vluxei64_v_u8m1(const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_u8m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16mf4( +// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +vuint16mf4_t test_vluxei8_v_u16mf4(const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_u16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16mf2( +// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +vuint16mf2_t test_vluxei8_v_u16mf2(const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_u16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m1( +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +vuint16m1_t test_vluxei8_v_u16m1(const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_u16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m2( +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { +vuint16m2_t test_vluxei8_v_u16m2(const uint16_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_u16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m4( +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t testuxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { +vuint16m4_t test_vluxei8_v_u16m4(const uint16_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_u16m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m8( +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t testuxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { +vuint16m8_t test_vluxei8_v_u16m8(const uint16_t *base, vuint8m4_t bindex, size_t vl) { return vluxei8_v_u16m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16mf4( +// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +vuint16mf4_t test_vluxei16_v_u16mf4(const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_u16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16mf2( +// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +vuint16mf2_t test_vluxei16_v_u16mf2(const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_u16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m1( +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { +vuint16m1_t test_vluxei16_v_u16m1(const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_u16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m2( +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { +vuint16m2_t test_vluxei16_v_u16m2(const uint16_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_u16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m4( +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t testuxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { +vuint16m4_t test_vluxei16_v_u16m4(const uint16_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_u16m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m8( +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32i16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t testuxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { +vuint16m8_t test_vluxei16_v_u16m8(const uint16_t *base, vuint16m8_t bindex, size_t vl) { return vluxei16_v_u16m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16mf4( +// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +vuint16mf4_t test_vluxei32_v_u16mf4(const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_u16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16mf2( +// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { +vuint16mf2_t test_vluxei32_v_u16mf2(const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_u16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16m1( +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { +vuint16m1_t test_vluxei32_v_u16m1(const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_u16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16m2( +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { +vuint16m2_t test_vluxei32_v_u16m2(const uint16_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_u16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16m4( +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t testuxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { +vuint16m4_t test_vluxei32_v_u16m4(const uint16_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_u16m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16mf4( +// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { +vuint16mf4_t test_vluxei64_v_u16mf4(const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_u16mf4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16mf2( +// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { +vuint16mf2_t test_vluxei64_v_u16mf2(const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_u16mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16m1( +// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { +vuint16m1_t test_vluxei64_v_u16m1(const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_u16m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16m2( +// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { +vuint16m2_t test_vluxei64_v_u16m2(const uint16_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_u16m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32mf2( +// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +vuint32mf2_t test_vluxei8_v_u32mf2(const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_u32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m1( +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +vuint32m1_t test_vluxei8_v_u32m1(const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_u32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m2( +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { +vuint32m2_t test_vluxei8_v_u32m2(const uint32_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_u32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m4( +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { +vuint32m4_t test_vluxei8_v_u32m4(const uint32_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_u32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m8( +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t testuxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { +vuint32m8_t test_vluxei8_v_u32m8(const uint32_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_u32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32mf2( +// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +vuint32mf2_t test_vluxei16_v_u32mf2(const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_u32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m1( +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +vuint32m1_t test_vluxei16_v_u32m1(const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_u32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m2( +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { +vuint32m2_t test_vluxei16_v_u32m2(const uint32_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_u32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m4( +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { +vuint32m4_t test_vluxei16_v_u32m4(const uint32_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_u32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m8( +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t testuxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { +vuint32m8_t test_vluxei16_v_u32m8(const uint32_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_u32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32mf2( +// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +vuint32mf2_t test_vluxei32_v_u32mf2(const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_u32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m1( +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { +vuint32m1_t test_vluxei32_v_u32m1(const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_u32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m2( +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { +vuint32m2_t test_vluxei32_v_u32m2(const uint32_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_u32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m4( +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { +vuint32m4_t test_vluxei32_v_u32m4(const uint32_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_u32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m8( +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16i32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t testuxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { +vuint32m8_t test_vluxei32_v_u32m8(const uint32_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_u32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32mf2( +// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { +vuint32mf2_t test_vluxei64_v_u32mf2(const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_u32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32m1( +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { +vuint32m1_t test_vluxei64_v_u32m1(const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_u32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32m2( +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { +vuint32m2_t test_vluxei64_v_u32m2(const uint32_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_u32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32m4( +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { +vuint32m4_t test_vluxei64_v_u32m4(const uint32_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_u32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m1( +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +vuint64m1_t test_vluxei8_v_u64m1(const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_u64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m2( +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { +vuint64m2_t test_vluxei8_v_u64m2(const uint64_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_u64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m4( +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { +vuint64m4_t test_vluxei8_v_u64m4(const uint64_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_u64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m8( +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { +vuint64m8_t test_vluxei8_v_u64m8(const uint64_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_u64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m1( +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +vuint64m1_t test_vluxei16_v_u64m1(const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_u64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m2( +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { +vuint64m2_t test_vluxei16_v_u64m2(const uint64_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_u64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m4( +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { +vuint64m4_t test_vluxei16_v_u64m4(const uint64_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_u64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m8( +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { +vuint64m8_t test_vluxei16_v_u64m8(const uint64_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_u64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m1( +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +vuint64m1_t test_vluxei32_v_u64m1(const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_u64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m2( +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { +vuint64m2_t test_vluxei32_v_u64m2(const uint64_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_u64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m4( +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { +vuint64m4_t test_vluxei32_v_u64m4(const uint64_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_u64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m8( +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { +vuint64m8_t test_vluxei32_v_u64m8(const uint64_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_u64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m1( +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1i64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { +vuint64m1_t test_vluxei64_v_u64m1(const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_u64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m2( +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2i64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { +vuint64m2_t test_vluxei64_v_u64m2(const uint64_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_u64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m4( +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4i64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { +vuint64m4_t test_vluxei64_v_u64m4(const uint64_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_u64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m8( +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8i64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { +vuint64m8_t test_vluxei64_v_u64m8(const uint64_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_u64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { +vfloat32mf2_t test_vluxei8_v_f32mf2(const float *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_f32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { +vfloat32m1_t test_vluxei8_v_f32m1(const float *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_f32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { +vfloat32m2_t test_vluxei8_v_f32m2(const float *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_f32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m4( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { +vfloat32m4_t test_vluxei8_v_f32m4(const float *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_f32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m8( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t testuxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { +vfloat32m8_t test_vluxei8_v_f32m8(const float *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_f32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { +vfloat32mf2_t test_vluxei16_v_f32mf2(const float *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_f32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { +vfloat32m1_t test_vluxei16_v_f32m1(const float *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_f32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { +vfloat32m2_t test_vluxei16_v_f32m2(const float *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_f32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m4( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { +vfloat32m4_t test_vluxei16_v_f32m4(const float *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_f32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m8( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t testuxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { +vfloat32m8_t test_vluxei16_v_f32m8(const float *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_f32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { +vfloat32mf2_t test_vluxei32_v_f32mf2(const float *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_f32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { +vfloat32m1_t test_vluxei32_v_f32m1(const float *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_f32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { +vfloat32m2_t test_vluxei32_v_f32m2(const float *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_f32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m4( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { +vfloat32m4_t test_vluxei32_v_f32m4(const float *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_f32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m8( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f32.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t testuxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { +vfloat32m8_t test_vluxei32_v_f32m8(const float *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_f32m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32mf2( +// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f32.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { +vfloat32mf2_t test_vluxei64_v_f32mf2(const float *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_f32mf2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32m1( +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f32.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { +vfloat32m1_t test_vluxei64_v_f32m1(const float *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_f32m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32m2( +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f32.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { +vfloat32m2_t test_vluxei64_v_f32m2(const float *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_f32m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32m4( +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f32.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { +vfloat32m4_t test_vluxei64_v_f32m4(const float *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_f32m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { +vfloat64m1_t test_vluxei8_v_f64m1(const double *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_f64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { +vfloat64m2_t test_vluxei8_v_f64m2(const double *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_f64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m4( +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { +vfloat64m4_t test_vluxei8_v_f64m4(const double *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_f64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m8( +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { +vfloat64m8_t test_vluxei8_v_f64m8(const double *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_f64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { +vfloat64m1_t test_vluxei16_v_f64m1(const double *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_f64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { +vfloat64m2_t test_vluxei16_v_f64m2(const double *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_f64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m4( +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { +vfloat64m4_t test_vluxei16_v_f64m4(const double *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_f64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m8( +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { +vfloat64m8_t test_vluxei16_v_f64m8(const double *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_f64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { +vfloat64m1_t test_vluxei32_v_f64m1(const double *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_f64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { +vfloat64m2_t test_vluxei32_v_f64m2(const double *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_f64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m4( +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { +vfloat64m4_t test_vluxei32_v_f64m4(const double *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_f64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m8( +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { +vfloat64m8_t test_vluxei32_v_f64m8(const double *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_f64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m1( +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f64.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { +vfloat64m1_t test_vluxei64_v_f64m1(const double *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_f64m1(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m2( +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f64.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { +vfloat64m2_t test_vluxei64_v_f64m2(const double *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_f64m2(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m4( +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f64.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { +vfloat64m4_t test_vluxei64_v_f64m4(const double *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_f64m4(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m8( +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f64.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { +vfloat64m8_t test_vluxei64_v_f64m8(const double *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_f64m8(base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { +vint8mf8_t test_vluxei8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { +vint8mf4_t test_vluxei8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { +vint8mf2_t test_vluxei8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { +vint8m1_t test_vluxei8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_i8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t testuxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { +vint8m2_t test_vluxei8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_i8m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t testuxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { +vint8m4_t test_vluxei8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint8m4_t bindex, size_t vl) { return vluxei8_v_i8m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i8m8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m8_t testuxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { +vint8m8_t test_vluxei8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, vuint8m8_t bindex, size_t vl) { return vluxei8_v_i8m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { +vint8mf8_t test_vluxei16_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { +vint8mf4_t test_vluxei16_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { +vint8mf2_t test_vluxei16_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { +vint8m1_t test_vluxei16_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_i8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t testuxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { +vint8m2_t test_vluxei16_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_i8m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i8m4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m4_t testuxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { +vint8m4_t test_vluxei16_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, vuint16m8_t bindex, size_t vl) { return vluxei16_v_i8m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { +vint8mf8_t test_vluxei32_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { +vint8mf4_t test_vluxei32_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { +vint8mf2_t test_vluxei32_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { +vint8m1_t test_vluxei32_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_i8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i8m2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m2_t testuxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { +vint8m2_t test_vluxei32_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_i8m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8mf8_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf8_t testuxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { +vint8mf8_t test_vluxei64_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_i8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8mf4_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf4_t testuxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { +vint8mf4_t test_vluxei64_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_i8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8mf2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8mf2_t testuxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { +vint8mf2_t test_vluxei64_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_i8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i8m1_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint8m1_t testuxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { +vint8m1_t test_vluxei64_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_i8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { +vint16mf4_t test_vluxei8_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { +vint16mf2_t test_vluxei8_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { +vint16m1_t test_vluxei8_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_i16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { +vint16m2_t test_vluxei8_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_i16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t testuxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { +vint16m4_t test_vluxei8_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_i16m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i16m8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t testuxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { +vint16m8_t test_vluxei8_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint8m4_t bindex, size_t vl) { return vluxei8_v_i16m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { +vint16mf4_t test_vluxei16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { +vint16mf2_t test_vluxei16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { +vint16m1_t test_vluxei16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_i16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { +vint16m2_t test_vluxei16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_i16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t testuxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { +vint16m4_t test_vluxei16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_i16m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i16m8_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m8_t testuxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { +vint16m8_t test_vluxei16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, vuint16m8_t bindex, size_t vl) { return vluxei16_v_i16m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { +vint16mf4_t test_vluxei32_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { +vint16mf2_t test_vluxei32_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { +vint16m1_t test_vluxei32_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_i16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { +vint16m2_t test_vluxei32_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_i16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i16m4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m4_t testuxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { +vint16m4_t test_vluxei32_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_i16m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16mf4_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf4_t testuxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { +vint16mf4_t test_vluxei64_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_i16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16mf2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16mf2_t testuxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { +vint16mf2_t test_vluxei64_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_i16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16m1_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m1_t testuxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { +vint16m1_t test_vluxei64_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_i16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i16m2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint16m2_t testuxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { +vint16m2_t test_vluxei64_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_i16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { +vint32mf2_t test_vluxei8_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { +vint32m1_t test_vluxei8_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_i32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { +vint32m2_t test_vluxei8_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_i32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { +vint32m4_t test_vluxei8_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_i32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i32m8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t testuxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { +vint32m8_t test_vluxei8_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_i32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { +vint32mf2_t test_vluxei16_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { +vint32m1_t test_vluxei16_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_i32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { +vint32m2_t test_vluxei16_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_i32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { +vint32m4_t test_vluxei16_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_i32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i32m8_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t testuxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { +vint32m8_t test_vluxei16_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_i32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { +vint32mf2_t test_vluxei32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { +vint32m1_t test_vluxei32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_i32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { +vint32m2_t test_vluxei32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_i32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { +vint32m4_t test_vluxei32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_i32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i32m8_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m8_t testuxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { +vint32m8_t test_vluxei32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_i32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32mf2_t testuxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { +vint32mf2_t test_vluxei64_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_i32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32m1_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m1_t testuxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { +vint32m1_t test_vluxei64_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_i32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32m2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m2_t testuxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { +vint32m2_t test_vluxei64_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_i32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i32m4_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint32m4_t testuxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { +vint32m4_t test_vluxei64_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_i32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { +vint64m1_t test_vluxei8_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_i64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { +vint64m2_t test_vluxei8_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_i64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { +vint64m4_t test_vluxei8_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_i64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { +vint64m8_t test_vluxei8_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_i64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { +vint64m1_t test_vluxei16_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_i64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { +vint64m2_t test_vluxei16_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_i64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { +vint64m4_t test_vluxei16_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_i64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { +vint64m8_t test_vluxei16_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_i64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { +vint64m1_t test_vluxei32_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_i64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { +vint64m2_t test_vluxei32_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_i64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { +vint64m4_t test_vluxei32_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_i64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { +vint64m8_t test_vluxei32_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_i64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m1_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m1_t testuxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { +vint64m1_t test_vluxei64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_i64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m2_t testuxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { +vint64m2_t test_vluxei64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_i64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m4_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m4_t testuxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { +vint64m4_t test_vluxei64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_i64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_i64m8_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vint64m8_t testuxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { +vint64m8_t test_vluxei64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_i64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { +vuint8mf8_t test_vluxei8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { +vuint8mf4_t test_vluxei8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { +vuint8mf2_t test_vluxei8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { +vuint8m1_t test_vluxei8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_u8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t testuxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { +vuint8m2_t test_vluxei8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_u8m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t testuxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { +vuint8m4_t test_vluxei8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint8m4_t bindex, size_t vl) { return vluxei8_v_u8m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u8m8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m8_t testuxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { +vuint8m8_t test_vluxei8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, vuint8m8_t bindex, size_t vl) { return vluxei8_v_u8m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { +vuint8mf8_t test_vluxei16_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { +vuint8mf4_t test_vluxei16_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { +vuint8mf2_t test_vluxei16_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { +vuint8m1_t test_vluxei16_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_u8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t testuxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { +vuint8m2_t test_vluxei16_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_u8m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u8m4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m4_t testuxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { +vuint8m4_t test_vluxei16_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, vuint16m8_t bindex, size_t vl) { return vluxei16_v_u8m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { +vuint8mf8_t test_vluxei32_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { +vuint8mf4_t test_vluxei32_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { +vuint8mf2_t test_vluxei32_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { +vuint8m1_t test_vluxei32_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_u8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u8m2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m2_t testuxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { +vuint8m2_t test_vluxei32_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_u8m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8mf8_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf8_t testuxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { +vuint8mf8_t test_vluxei64_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_u8mf8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8mf4_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf4_t testuxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { +vuint8mf4_t test_vluxei64_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_u8mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8mf2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8mf2_t testuxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { +vuint8mf2_t test_vluxei64_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_u8mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u8m1_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint8m1_t testuxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { +vuint8m1_t test_vluxei64_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_u8m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { +vuint16mf4_t test_vluxei8_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { +vuint16mf2_t test_vluxei8_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { +vuint16m1_t test_vluxei8_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_u16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { +vuint16m2_t test_vluxei8_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_u16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t testuxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { +vuint16m4_t test_vluxei8_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_u16m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u16m8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t testuxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { +vuint16m8_t test_vluxei8_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint8m4_t bindex, size_t vl) { return vluxei8_v_u16m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { +vuint16mf4_t test_vluxei16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { +vuint16mf2_t test_vluxei16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { +vuint16m1_t test_vluxei16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_u16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { +vuint16m2_t test_vluxei16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_u16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t testuxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { +vuint16m4_t test_vluxei16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_u16m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u16m8_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m8_t testuxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { +vuint16m8_t test_vluxei16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, vuint16m8_t bindex, size_t vl) { return vluxei16_v_u16m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { +vuint16mf4_t test_vluxei32_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { +vuint16mf2_t test_vluxei32_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { +vuint16m1_t test_vluxei32_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_u16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { +vuint16m2_t test_vluxei32_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_u16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u16m4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m4_t testuxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { +vuint16m4_t test_vluxei32_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_u16m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16mf4_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf4_t testuxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { +vuint16mf4_t test_vluxei64_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_u16mf4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16mf2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16mf2_t testuxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { +vuint16mf2_t test_vluxei64_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_u16mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16m1_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m1_t testuxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { +vuint16m1_t test_vluxei64_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_u16m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u16m2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint16m2_t testuxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { +vuint16m2_t test_vluxei64_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_u16m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { +vuint32mf2_t test_vluxei8_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { +vuint32m1_t test_vluxei8_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_u32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { +vuint32m2_t test_vluxei8_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_u32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { +vuint32m4_t test_vluxei8_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_u32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u32m8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t testuxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { +vuint32m8_t test_vluxei8_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_u32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { +vuint32mf2_t test_vluxei16_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { +vuint32m1_t test_vluxei16_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_u32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { +vuint32m2_t test_vluxei16_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_u32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { +vuint32m4_t test_vluxei16_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_u32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u32m8_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t testuxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { +vuint32m8_t test_vluxei16_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_u32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { +vuint32mf2_t test_vluxei32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { +vuint32m1_t test_vluxei32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_u32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { +vuint32m2_t test_vluxei32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_u32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { +vuint32m4_t test_vluxei32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_u32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u32m8_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m8_t testuxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { +vuint32m8_t test_vluxei32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_u32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32mf2_t testuxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { +vuint32mf2_t test_vluxei64_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_u32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32m1_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m1_t testuxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { +vuint32m1_t test_vluxei64_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_u32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32m2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m2_t testuxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { +vuint32m2_t test_vluxei64_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_u32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u32m4_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint32m4_t testuxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { +vuint32m4_t test_vluxei64_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_u32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { +vuint64m1_t test_vluxei8_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_u64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { +vuint64m2_t test_vluxei8_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_u64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { +vuint64m4_t test_vluxei8_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_u64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { +vuint64m8_t test_vluxei8_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_u64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { +vuint64m1_t test_vluxei16_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_u64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { +vuint64m2_t test_vluxei16_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_u64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { +vuint64m4_t test_vluxei16_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_u64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { +vuint64m8_t test_vluxei16_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_u64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { +vuint64m1_t test_vluxei32_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_u64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { +vuint64m2_t test_vluxei32_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_u64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { +vuint64m4_t test_vluxei32_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_u64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { +vuint64m8_t test_vluxei32_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_u64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m1_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m1_t testuxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { +vuint64m1_t test_vluxei64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_u64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m2_t testuxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { +vuint64m2_t test_vluxei64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_u64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m4_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m4_t testuxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { +vuint64m4_t test_vluxei64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_u64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_u64m8_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vuint64m8_t testuxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { +vuint64m8_t test_vluxei64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_u64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { +vfloat32mf2_t test_vluxei8_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_f32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { +vfloat32m1_t test_vluxei8_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_f32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { +vfloat32m2_t test_vluxei8_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_f32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { +vfloat32m4_t test_vluxei8_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_f32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t testuxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { +vfloat32m8_t test_vluxei8_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint8m2_t bindex, size_t vl) { return vluxei8_v_f32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { +vfloat32mf2_t test_vluxei16_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_f32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { +vfloat32m1_t test_vluxei16_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_f32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { +vfloat32m2_t test_vluxei16_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_f32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { +vfloat32m4_t test_vluxei16_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_f32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t testuxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { +vfloat32m8_t test_vluxei16_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint16m4_t bindex, size_t vl) { return vluxei16_v_f32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { +vfloat32mf2_t test_vluxei32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_f32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { +vfloat32m1_t test_vluxei32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_f32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { +vfloat32m2_t test_vluxei32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_f32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { +vfloat32m4_t test_vluxei32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_f32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f32m8_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m8_t testuxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { +vfloat32m8_t test_vluxei32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, vuint32m8_t bindex, size_t vl) { return vluxei32_v_f32m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32mf2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32mf2_t testuxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { +vfloat32mf2_t test_vluxei64_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_f32mf2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32m1_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m1_t testuxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { +vfloat32m1_t test_vluxei64_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_f32m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32m2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m2_t testuxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { +vfloat32m2_t test_vluxei64_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_f32m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f32m4_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat32m4_t testuxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { +vfloat32m4_t test_vluxei64_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_f32m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { +vfloat64m1_t test_vluxei8_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint8mf8_t bindex, size_t vl) { return vluxei8_v_f64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { +vfloat64m2_t test_vluxei8_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint8mf4_t bindex, size_t vl) { return vluxei8_v_f64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { +vfloat64m4_t test_vluxei8_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint8mf2_t bindex, size_t vl) { return vluxei8_v_f64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei8_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vluxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { +vfloat64m8_t test_vluxei8_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint8m1_t bindex, size_t vl) { return vluxei8_v_f64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { +vfloat64m1_t test_vluxei16_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint16mf4_t bindex, size_t vl) { return vluxei16_v_f64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { +vfloat64m2_t test_vluxei16_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint16mf2_t bindex, size_t vl) { return vluxei16_v_f64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { +vfloat64m4_t test_vluxei16_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint16m1_t bindex, size_t vl) { return vluxei16_v_f64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei16_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vluxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { +vfloat64m8_t test_vluxei16_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint16m2_t bindex, size_t vl) { return vluxei16_v_f64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { +vfloat64m1_t test_vluxei32_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint32mf2_t bindex, size_t vl) { return vluxei32_v_f64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { +vfloat64m2_t test_vluxei32_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint32m1_t bindex, size_t vl) { return vluxei32_v_f64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { +vfloat64m4_t test_vluxei32_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint32m2_t bindex, size_t vl) { return vluxei32_v_f64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei32_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vluxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { +vfloat64m8_t test_vluxei32_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint32m4_t bindex, size_t vl) { return vluxei32_v_f64m8_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m1_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m1_t testuxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { +vfloat64m1_t test_vluxei64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, vuint64m1_t bindex, size_t vl) { return vluxei64_v_f64m1_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m2_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m2_t testuxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { +vfloat64m2_t test_vluxei64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, vuint64m2_t bindex, size_t vl) { return vluxei64_v_f64m2_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m4_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m4_t testuxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { +vfloat64m4_t test_vluxei64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, vuint64m4_t bindex, size_t vl) { return vluxei64_v_f64m4_m(mask, maskedoff, base, bindex, vl); } -// -// CHECK-RV64-LABEL: @testuxei64_v_f64m8_m( +// CHECK-RV64-LABEL: @test_vluxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * // CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP1]] // -vfloat64m8_t testuxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { +vfloat64m8_t test_vluxei64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, vuint64m8_t bindex, size_t vl) { return vluxei64_v_f64m8_m(mask, maskedoff, base, bindex, vl); } + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei8_v_f16mf4 (const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_f16mf4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei8_v_f16mf2 (const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_f16mf2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei8_v_f16m1 (const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_f16m1 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei8_v_f16m2 (const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_f16m2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei8_v_f16m4 (const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_f16m4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i8.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei8_v_f16m8 (const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vluxei8_v_f16m8 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei16_v_f16mf4 (const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_f16mf4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei16_v_f16mf2 (const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_f16mf2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei16_v_f16m1 (const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_f16m1 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei16_v_f16m2 (const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_f16m2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei16_v_f16m4 (const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_f16m4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv32f16.nxv32i16.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei16_v_f16m8 (const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vluxei16_v_f16m8 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei32_v_f16mf4 (const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_f16mf4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei32_v_f16mf2 (const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_f16mf2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei32_v_f16m1 (const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_f16m1 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei32_v_f16m2 (const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_f16m2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv16f16.nxv16i32.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei32_v_f16m4 (const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxei32_v_f16m4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv1f16.nxv1i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei64_v_f16mf4 (const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_f16mf4 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv2f16.nxv2i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei64_v_f16mf2 (const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_f16mf2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv4f16.nxv4i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei64_v_f16m1 (const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_f16m1 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.nxv8f16.nxv8i64.i64(* [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei64_v_f16m2 (const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_f16m2 (base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei8_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint8mf8_t bindex, size_t vl) { + return vluxei8_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei8_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint8mf4_t bindex, size_t vl) { + return vluxei8_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +} + + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei8_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint8mf2_t bindex, size_t vl) { + return vluxei8_v_f16m1_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei8_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint8m1_t bindex, size_t vl) { + return vluxei8_v_f16m2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei8_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint8m2_t bindex, size_t vl) { + return vluxei8_v_f16m4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei8_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei8_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint8m4_t bindex, size_t vl) { + return vluxei8_v_f16m8_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei16_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint16mf4_t bindex, size_t vl) { + return vluxei16_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei16_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint16mf2_t bindex, size_t vl) { + return vluxei16_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei16_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint16m1_t bindex, size_t vl) { + return vluxei16_v_f16m1_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei16_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint16m2_t bindex, size_t vl) { + return vluxei16_v_f16m2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei16_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint16m4_t bindex, size_t vl) { + return vluxei16_v_f16m4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m8_t test_vluxei16_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, const _Float16 *base, vuint16m8_t bindex, size_t vl) { + return vluxei16_v_f16m8_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei32_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint32mf2_t bindex, size_t vl) { + return vluxei32_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei32_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint32m1_t bindex, size_t vl) { + return vluxei32_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei32_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint32m2_t bindex, size_t vl) { + return vluxei32_v_f16m1_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei32_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint32m4_t bindex, size_t vl) { + return vluxei32_v_f16m2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m4_t test_vluxei32_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, const _Float16 *base, vuint32m8_t bindex, size_t vl) { + return vluxei32_v_f16m4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf4_t test_vluxei64_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, const _Float16 *base, vuint64m1_t bindex, size_t vl) { + return vluxei64_v_f16mf4_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16mf2_t test_vluxei64_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, const _Float16 *base, vuint64m2_t bindex, size_t vl) { + return vluxei64_v_f16mf2_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m1_t test_vluxei64_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, const _Float16 *base, vuint64m4_t bindex, size_t vl) { + return vluxei64_v_f16m1_m (mask, maskedoff, base, bindex, vl); +} + +// CHECK-RV64-LABEL: @test_vluxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat16m2_t test_vluxei64_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, const _Float16 *base, vuint64m8_t bindex, size_t vl) { + return vluxei64_v_f16m2_m (mask, maskedoff, base, bindex, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsoxei.c @@ -1,11 +1,11 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -17,7 +17,6 @@ return vsoxei8_v_i8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -29,7 +28,6 @@ return vsoxei8_v_i8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -41,7 +39,6 @@ return vsoxei8_v_i8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -53,7 +50,6 @@ return vsoxei8_v_i8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -65,7 +61,6 @@ return vsoxei8_v_i8m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -77,7 +72,6 @@ return vsoxei8_v_i8m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -89,7 +83,6 @@ return vsoxei8_v_i8m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -101,7 +94,6 @@ return vsoxei16_v_i8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -113,7 +105,6 @@ return vsoxei16_v_i8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -125,7 +116,6 @@ return vsoxei16_v_i8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -137,7 +127,6 @@ return vsoxei16_v_i8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -149,7 +138,6 @@ return vsoxei16_v_i8m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -161,7 +149,6 @@ return vsoxei16_v_i8m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -173,7 +160,6 @@ return vsoxei32_v_i8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -185,7 +171,6 @@ return vsoxei32_v_i8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -197,7 +182,6 @@ return vsoxei32_v_i8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -209,7 +193,6 @@ return vsoxei32_v_i8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -221,7 +204,6 @@ return vsoxei32_v_i8m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -233,7 +215,6 @@ return vsoxei64_v_i8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -245,7 +226,6 @@ return vsoxei64_v_i8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -257,7 +237,6 @@ return vsoxei64_v_i8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -269,7 +248,6 @@ return vsoxei64_v_i8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -281,7 +259,6 @@ return vsoxei8_v_i16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -293,7 +270,6 @@ return vsoxei8_v_i16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -305,7 +281,6 @@ return vsoxei8_v_i16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -317,7 +292,6 @@ return vsoxei8_v_i16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -329,7 +303,6 @@ return vsoxei8_v_i16m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -341,7 +314,6 @@ return vsoxei8_v_i16m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -353,7 +325,6 @@ return vsoxei16_v_i16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -365,7 +336,6 @@ return vsoxei16_v_i16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -377,7 +347,6 @@ return vsoxei16_v_i16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -389,7 +358,6 @@ return vsoxei16_v_i16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -401,7 +369,6 @@ return vsoxei16_v_i16m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -413,7 +380,6 @@ return vsoxei16_v_i16m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -425,7 +391,6 @@ return vsoxei32_v_i16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -437,7 +402,6 @@ return vsoxei32_v_i16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -449,7 +413,6 @@ return vsoxei32_v_i16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -461,7 +424,6 @@ return vsoxei32_v_i16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -473,7 +435,6 @@ return vsoxei32_v_i16m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -485,7 +446,6 @@ return vsoxei64_v_i16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -497,7 +457,6 @@ return vsoxei64_v_i16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -509,7 +468,6 @@ return vsoxei64_v_i16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -521,7 +479,6 @@ return vsoxei64_v_i16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -533,7 +490,6 @@ return vsoxei8_v_i32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -545,7 +501,6 @@ return vsoxei8_v_i32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -557,7 +512,6 @@ return vsoxei8_v_i32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -569,7 +523,6 @@ return vsoxei8_v_i32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -581,7 +534,6 @@ return vsoxei8_v_i32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -593,7 +545,6 @@ return vsoxei16_v_i32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -605,7 +556,6 @@ return vsoxei16_v_i32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -617,7 +567,6 @@ return vsoxei16_v_i32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -629,7 +578,6 @@ return vsoxei16_v_i32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -641,7 +589,6 @@ return vsoxei16_v_i32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -653,7 +600,6 @@ return vsoxei32_v_i32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -665,7 +611,6 @@ return vsoxei32_v_i32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -677,7 +622,6 @@ return vsoxei32_v_i32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -689,7 +633,6 @@ return vsoxei32_v_i32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -701,7 +644,6 @@ return vsoxei32_v_i32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -713,7 +655,6 @@ return vsoxei64_v_i32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -725,7 +666,6 @@ return vsoxei64_v_i32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -737,7 +677,6 @@ return vsoxei64_v_i32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -749,7 +688,6 @@ return vsoxei64_v_i32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -761,7 +699,6 @@ return vsoxei8_v_i64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -773,7 +710,6 @@ return vsoxei8_v_i64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -785,7 +721,6 @@ return vsoxei8_v_i64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -797,7 +732,6 @@ return vsoxei8_v_i64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -809,7 +743,6 @@ return vsoxei16_v_i64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -821,7 +754,6 @@ return vsoxei16_v_i64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -833,7 +765,6 @@ return vsoxei16_v_i64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -845,7 +776,6 @@ return vsoxei16_v_i64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -857,7 +787,6 @@ return vsoxei32_v_i64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -869,7 +798,6 @@ return vsoxei32_v_i64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -881,7 +809,6 @@ return vsoxei32_v_i64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -893,7 +820,6 @@ return vsoxei32_v_i64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -905,7 +831,6 @@ return vsoxei64_v_i64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -917,7 +842,6 @@ return vsoxei64_v_i64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -929,7 +853,6 @@ return vsoxei64_v_i64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -941,7 +864,6 @@ return vsoxei64_v_i64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -953,7 +875,6 @@ return vsoxei8_v_u8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -965,7 +886,6 @@ return vsoxei8_v_u8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -977,7 +897,6 @@ return vsoxei8_v_u8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -989,7 +908,6 @@ return vsoxei8_v_u8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1001,7 +919,6 @@ return vsoxei8_v_u8m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1013,7 +930,6 @@ return vsoxei8_v_u8m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1025,7 +941,6 @@ return vsoxei8_v_u8m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1037,7 +952,6 @@ return vsoxei16_v_u8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1049,7 +963,6 @@ return vsoxei16_v_u8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1061,7 +974,6 @@ return vsoxei16_v_u8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1073,7 +985,6 @@ return vsoxei16_v_u8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1085,7 +996,6 @@ return vsoxei16_v_u8m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1097,7 +1007,6 @@ return vsoxei16_v_u8m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1109,7 +1018,6 @@ return vsoxei32_v_u8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1121,7 +1029,6 @@ return vsoxei32_v_u8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1133,7 +1040,6 @@ return vsoxei32_v_u8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1145,7 +1051,6 @@ return vsoxei32_v_u8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1157,7 +1062,6 @@ return vsoxei32_v_u8m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1169,7 +1073,6 @@ return vsoxei64_v_u8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1181,7 +1084,6 @@ return vsoxei64_v_u8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1193,7 +1095,6 @@ return vsoxei64_v_u8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1205,7 +1106,6 @@ return vsoxei64_v_u8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1217,7 +1117,6 @@ return vsoxei8_v_u16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1229,7 +1128,6 @@ return vsoxei8_v_u16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1241,7 +1139,6 @@ return vsoxei8_v_u16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1253,7 +1150,6 @@ return vsoxei8_v_u16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1265,7 +1161,6 @@ return vsoxei8_v_u16m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1277,7 +1172,6 @@ return vsoxei8_v_u16m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1289,7 +1183,6 @@ return vsoxei16_v_u16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1301,7 +1194,6 @@ return vsoxei16_v_u16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1313,7 +1205,6 @@ return vsoxei16_v_u16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1325,7 +1216,6 @@ return vsoxei16_v_u16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1337,7 +1227,6 @@ return vsoxei16_v_u16m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1349,7 +1238,6 @@ return vsoxei16_v_u16m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1361,7 +1249,6 @@ return vsoxei32_v_u16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1373,7 +1260,6 @@ return vsoxei32_v_u16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1385,7 +1271,6 @@ return vsoxei32_v_u16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1397,7 +1282,6 @@ return vsoxei32_v_u16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1409,7 +1293,6 @@ return vsoxei32_v_u16m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1421,7 +1304,6 @@ return vsoxei64_v_u16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1433,7 +1315,6 @@ return vsoxei64_v_u16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1445,7 +1326,6 @@ return vsoxei64_v_u16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1457,7 +1337,6 @@ return vsoxei64_v_u16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1469,7 +1348,6 @@ return vsoxei8_v_u32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1481,7 +1359,6 @@ return vsoxei8_v_u32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1493,7 +1370,6 @@ return vsoxei8_v_u32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1505,7 +1381,6 @@ return vsoxei8_v_u32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1517,7 +1392,6 @@ return vsoxei8_v_u32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1529,7 +1403,6 @@ return vsoxei16_v_u32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1541,7 +1414,6 @@ return vsoxei16_v_u32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1553,7 +1425,6 @@ return vsoxei16_v_u32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1565,7 +1436,6 @@ return vsoxei16_v_u32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1577,7 +1447,6 @@ return vsoxei16_v_u32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1589,7 +1458,6 @@ return vsoxei32_v_u32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1601,7 +1469,6 @@ return vsoxei32_v_u32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1613,7 +1480,6 @@ return vsoxei32_v_u32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1625,7 +1491,6 @@ return vsoxei32_v_u32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1637,7 +1502,6 @@ return vsoxei32_v_u32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1649,7 +1513,6 @@ return vsoxei64_v_u32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1661,7 +1524,6 @@ return vsoxei64_v_u32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1673,7 +1535,6 @@ return vsoxei64_v_u32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1685,7 +1546,6 @@ return vsoxei64_v_u32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1697,7 +1557,6 @@ return vsoxei8_v_u64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1709,7 +1568,6 @@ return vsoxei8_v_u64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1721,7 +1579,6 @@ return vsoxei8_v_u64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1733,7 +1590,6 @@ return vsoxei8_v_u64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1745,7 +1601,6 @@ return vsoxei16_v_u64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1757,7 +1612,6 @@ return vsoxei16_v_u64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1769,7 +1623,6 @@ return vsoxei16_v_u64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1781,7 +1634,6 @@ return vsoxei16_v_u64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1793,7 +1645,6 @@ return vsoxei32_v_u64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1805,7 +1656,6 @@ return vsoxei32_v_u64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1817,7 +1667,6 @@ return vsoxei32_v_u64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1829,7 +1678,6 @@ return vsoxei32_v_u64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1841,7 +1689,6 @@ return vsoxei64_v_u64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1853,7 +1700,6 @@ return vsoxei64_v_u64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1865,7 +1711,6 @@ return vsoxei64_v_u64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1877,7 +1722,6 @@ return vsoxei64_v_u64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1889,7 +1733,6 @@ return vsoxei8_v_f32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1901,7 +1744,6 @@ return vsoxei8_v_f32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1913,7 +1755,6 @@ return vsoxei8_v_f32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1925,7 +1766,6 @@ return vsoxei8_v_f32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1937,7 +1777,6 @@ return vsoxei8_v_f32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1949,7 +1788,6 @@ return vsoxei16_v_f32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1961,7 +1799,6 @@ return vsoxei16_v_f32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1973,7 +1810,6 @@ return vsoxei16_v_f32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1985,7 +1821,6 @@ return vsoxei16_v_f32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1997,7 +1832,6 @@ return vsoxei16_v_f32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2009,7 +1843,6 @@ return vsoxei32_v_f32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2021,7 +1854,6 @@ return vsoxei32_v_f32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2033,7 +1865,6 @@ return vsoxei32_v_f32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2045,7 +1876,6 @@ return vsoxei32_v_f32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2057,7 +1887,6 @@ return vsoxei32_v_f32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2069,7 +1898,6 @@ return vsoxei64_v_f32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2081,7 +1909,6 @@ return vsoxei64_v_f32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2093,7 +1920,6 @@ return vsoxei64_v_f32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2105,7 +1931,6 @@ return vsoxei64_v_f32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2117,7 +1942,6 @@ return vsoxei8_v_f64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2129,7 +1953,6 @@ return vsoxei8_v_f64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2141,7 +1964,6 @@ return vsoxei8_v_f64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2153,7 +1975,6 @@ return vsoxei8_v_f64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2165,7 +1986,6 @@ return vsoxei16_v_f64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2177,7 +1997,6 @@ return vsoxei16_v_f64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2189,7 +2008,6 @@ return vsoxei16_v_f64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2201,7 +2019,6 @@ return vsoxei16_v_f64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2213,7 +2030,6 @@ return vsoxei32_v_f64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2225,7 +2041,6 @@ return vsoxei32_v_f64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2237,7 +2052,6 @@ return vsoxei32_v_f64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2249,7 +2063,6 @@ return vsoxei32_v_f64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2261,7 +2074,6 @@ return vsoxei64_v_f64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2273,7 +2085,6 @@ return vsoxei64_v_f64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2285,7 +2096,6 @@ return vsoxei64_v_f64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2297,7 +2107,6 @@ return vsoxei64_v_f64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2309,7 +2118,6 @@ return vsoxei8_v_i8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2321,7 +2129,6 @@ return vsoxei8_v_i8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2333,7 +2140,6 @@ return vsoxei8_v_i8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2345,7 +2151,6 @@ return vsoxei8_v_i8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2357,7 +2162,6 @@ return vsoxei8_v_i8m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2369,7 +2173,6 @@ return vsoxei8_v_i8m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2381,7 +2184,6 @@ return vsoxei8_v_i8m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2393,7 +2195,6 @@ return vsoxei16_v_i8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2405,7 +2206,6 @@ return vsoxei16_v_i8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2417,7 +2217,6 @@ return vsoxei16_v_i8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2429,7 +2228,6 @@ return vsoxei16_v_i8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2441,7 +2239,6 @@ return vsoxei16_v_i8m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2453,7 +2250,6 @@ return vsoxei16_v_i8m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2465,7 +2261,6 @@ return vsoxei32_v_i8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2477,7 +2272,6 @@ return vsoxei32_v_i8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2489,7 +2283,6 @@ return vsoxei32_v_i8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2501,7 +2294,6 @@ return vsoxei32_v_i8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2513,7 +2305,6 @@ return vsoxei32_v_i8m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2525,7 +2316,6 @@ return vsoxei64_v_i8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2537,7 +2327,6 @@ return vsoxei64_v_i8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2549,7 +2338,6 @@ return vsoxei64_v_i8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2561,7 +2349,6 @@ return vsoxei64_v_i8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2573,7 +2360,6 @@ return vsoxei8_v_i16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2585,7 +2371,6 @@ return vsoxei8_v_i16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2597,7 +2382,6 @@ return vsoxei8_v_i16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2609,7 +2393,6 @@ return vsoxei8_v_i16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2621,7 +2404,6 @@ return vsoxei8_v_i16m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2633,7 +2415,6 @@ return vsoxei8_v_i16m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2646,7 +2427,6 @@ return vsoxei16_v_i16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2659,7 +2439,6 @@ return vsoxei16_v_i16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2671,7 +2450,6 @@ return vsoxei16_v_i16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2683,7 +2461,6 @@ return vsoxei16_v_i16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2695,7 +2472,6 @@ return vsoxei16_v_i16m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2707,7 +2483,6 @@ return vsoxei16_v_i16m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2720,7 +2495,6 @@ return vsoxei32_v_i16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2732,7 +2506,6 @@ return vsoxei32_v_i16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2744,7 +2517,6 @@ return vsoxei32_v_i16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2756,7 +2528,6 @@ return vsoxei32_v_i16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2768,7 +2539,6 @@ return vsoxei32_v_i16m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2780,7 +2550,6 @@ return vsoxei64_v_i16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2792,7 +2561,6 @@ return vsoxei64_v_i16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2804,7 +2572,6 @@ return vsoxei64_v_i16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2816,7 +2583,6 @@ return vsoxei64_v_i16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2828,7 +2594,6 @@ return vsoxei8_v_i32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2840,7 +2605,6 @@ return vsoxei8_v_i32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2852,7 +2616,6 @@ return vsoxei8_v_i32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2864,7 +2627,6 @@ return vsoxei8_v_i32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2876,7 +2638,6 @@ return vsoxei8_v_i32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2889,7 +2650,6 @@ return vsoxei16_v_i32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2901,7 +2661,6 @@ return vsoxei16_v_i32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2913,7 +2672,6 @@ return vsoxei16_v_i32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2925,7 +2683,6 @@ return vsoxei16_v_i32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2937,7 +2694,6 @@ return vsoxei16_v_i32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2950,7 +2706,6 @@ return vsoxei32_v_i32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2962,7 +2717,6 @@ return vsoxei32_v_i32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2974,7 +2728,6 @@ return vsoxei32_v_i32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2986,7 +2739,6 @@ return vsoxei32_v_i32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2998,7 +2750,6 @@ return vsoxei32_v_i32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3010,7 +2761,6 @@ return vsoxei64_v_i32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3022,7 +2772,6 @@ return vsoxei64_v_i32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3034,7 +2783,6 @@ return vsoxei64_v_i32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3046,7 +2794,6 @@ return vsoxei64_v_i32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3058,7 +2805,6 @@ return vsoxei8_v_i64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3070,7 +2816,6 @@ return vsoxei8_v_i64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3082,7 +2827,6 @@ return vsoxei8_v_i64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3094,7 +2838,6 @@ return vsoxei8_v_i64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3106,7 +2849,6 @@ return vsoxei16_v_i64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3118,7 +2860,6 @@ return vsoxei16_v_i64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3130,7 +2871,6 @@ return vsoxei16_v_i64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3142,7 +2882,6 @@ return vsoxei16_v_i64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3154,7 +2893,6 @@ return vsoxei32_v_i64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3166,7 +2904,6 @@ return vsoxei32_v_i64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3178,7 +2915,6 @@ return vsoxei32_v_i64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3190,7 +2926,6 @@ return vsoxei32_v_i64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3202,7 +2937,6 @@ return vsoxei64_v_i64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3214,7 +2948,6 @@ return vsoxei64_v_i64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3226,7 +2959,6 @@ return vsoxei64_v_i64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3238,7 +2970,6 @@ return vsoxei64_v_i64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3250,7 +2981,6 @@ return vsoxei8_v_u8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3262,7 +2992,6 @@ return vsoxei8_v_u8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3274,7 +3003,6 @@ return vsoxei8_v_u8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3286,7 +3014,6 @@ return vsoxei8_v_u8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3298,7 +3025,6 @@ return vsoxei8_v_u8m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3310,7 +3036,6 @@ return vsoxei8_v_u8m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3322,7 +3047,6 @@ return vsoxei8_v_u8m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3334,7 +3058,6 @@ return vsoxei16_v_u8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3346,7 +3069,6 @@ return vsoxei16_v_u8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3358,7 +3080,6 @@ return vsoxei16_v_u8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3370,7 +3091,6 @@ return vsoxei16_v_u8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3382,7 +3102,6 @@ return vsoxei16_v_u8m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3394,7 +3113,6 @@ return vsoxei16_v_u8m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3406,7 +3124,6 @@ return vsoxei32_v_u8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3418,7 +3135,6 @@ return vsoxei32_v_u8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3430,7 +3146,6 @@ return vsoxei32_v_u8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3442,7 +3157,6 @@ return vsoxei32_v_u8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3454,7 +3168,6 @@ return vsoxei32_v_u8m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3466,7 +3179,6 @@ return vsoxei64_v_u8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3478,7 +3190,6 @@ return vsoxei64_v_u8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3490,7 +3201,6 @@ return vsoxei64_v_u8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3502,7 +3212,6 @@ return vsoxei64_v_u8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3514,7 +3223,6 @@ return vsoxei8_v_u16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3526,7 +3234,6 @@ return vsoxei8_v_u16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3538,7 +3245,6 @@ return vsoxei8_v_u16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3550,7 +3256,6 @@ return vsoxei8_v_u16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3562,7 +3267,6 @@ return vsoxei8_v_u16m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3574,7 +3278,6 @@ return vsoxei8_v_u16m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3587,7 +3290,6 @@ return vsoxei16_v_u16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3600,7 +3302,6 @@ return vsoxei16_v_u16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3612,7 +3313,6 @@ return vsoxei16_v_u16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3624,7 +3324,6 @@ return vsoxei16_v_u16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3636,7 +3335,6 @@ return vsoxei16_v_u16m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3648,7 +3346,6 @@ return vsoxei16_v_u16m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3661,7 +3358,6 @@ return vsoxei32_v_u16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3674,7 +3370,6 @@ return vsoxei32_v_u16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3686,7 +3381,6 @@ return vsoxei32_v_u16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3698,7 +3392,6 @@ return vsoxei32_v_u16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3710,7 +3403,6 @@ return vsoxei32_v_u16m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3723,7 +3415,6 @@ return vsoxei64_v_u16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3736,7 +3427,6 @@ return vsoxei64_v_u16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3748,7 +3438,6 @@ return vsoxei64_v_u16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3760,7 +3449,6 @@ return vsoxei64_v_u16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3772,7 +3460,6 @@ return vsoxei8_v_u32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3784,7 +3471,6 @@ return vsoxei8_v_u32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3796,7 +3482,6 @@ return vsoxei8_v_u32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3808,7 +3493,6 @@ return vsoxei8_v_u32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3820,7 +3504,6 @@ return vsoxei8_v_u32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3833,7 +3516,6 @@ return vsoxei16_v_u32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3846,7 +3528,6 @@ return vsoxei16_v_u32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3858,7 +3539,6 @@ return vsoxei16_v_u32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3870,7 +3550,6 @@ return vsoxei16_v_u32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3882,7 +3561,6 @@ return vsoxei16_v_u32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3895,7 +3573,6 @@ return vsoxei32_v_u32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3907,7 +3584,6 @@ return vsoxei32_v_u32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3919,7 +3595,6 @@ return vsoxei32_v_u32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3931,7 +3606,6 @@ return vsoxei32_v_u32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3943,7 +3617,6 @@ return vsoxei32_v_u32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3956,7 +3629,6 @@ return vsoxei64_v_u32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3968,7 +3640,6 @@ return vsoxei64_v_u32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3980,7 +3651,6 @@ return vsoxei64_v_u32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3992,7 +3662,6 @@ return vsoxei64_v_u32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4004,7 +3673,6 @@ return vsoxei8_v_u64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4016,7 +3684,6 @@ return vsoxei8_v_u64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4028,7 +3695,6 @@ return vsoxei8_v_u64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4040,7 +3706,6 @@ return vsoxei8_v_u64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4053,7 +3718,6 @@ return vsoxei16_v_u64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4066,7 +3730,6 @@ return vsoxei16_v_u64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4078,7 +3741,6 @@ return vsoxei16_v_u64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4090,7 +3752,6 @@ return vsoxei16_v_u64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4103,7 +3764,6 @@ return vsoxei32_v_u64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4115,7 +3775,6 @@ return vsoxei32_v_u64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4127,7 +3786,6 @@ return vsoxei32_v_u64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4139,7 +3797,6 @@ return vsoxei32_v_u64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4151,7 +3808,6 @@ return vsoxei64_v_u64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4163,7 +3819,6 @@ return vsoxei64_v_u64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4175,7 +3830,6 @@ return vsoxei64_v_u64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4187,7 +3841,6 @@ return vsoxei64_v_u64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4199,7 +3852,6 @@ return vsoxei8_v_f32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4211,7 +3863,6 @@ return vsoxei8_v_f32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4223,7 +3874,6 @@ return vsoxei8_v_f32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4235,7 +3885,6 @@ return vsoxei8_v_f32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4247,7 +3896,6 @@ return vsoxei8_v_f32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4259,7 +3907,6 @@ return vsoxei16_v_f32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4271,7 +3918,6 @@ return vsoxei16_v_f32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4283,7 +3929,6 @@ return vsoxei16_v_f32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4295,7 +3940,6 @@ return vsoxei16_v_f32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4307,7 +3951,6 @@ return vsoxei16_v_f32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4319,7 +3962,6 @@ return vsoxei32_v_f32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4331,7 +3973,6 @@ return vsoxei32_v_f32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4343,7 +3984,6 @@ return vsoxei32_v_f32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4355,7 +3995,6 @@ return vsoxei32_v_f32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4367,7 +4006,6 @@ return vsoxei32_v_f32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4379,7 +4017,6 @@ return vsoxei64_v_f32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4391,7 +4028,6 @@ return vsoxei64_v_f32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4403,7 +4039,6 @@ return vsoxei64_v_f32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4415,7 +4050,6 @@ return vsoxei64_v_f32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4427,7 +4061,6 @@ return vsoxei8_v_f64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4439,7 +4072,6 @@ return vsoxei8_v_f64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4451,7 +4083,6 @@ return vsoxei8_v_f64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4463,7 +4094,6 @@ return vsoxei8_v_f64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4475,7 +4105,6 @@ return vsoxei16_v_f64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4487,7 +4116,6 @@ return vsoxei16_v_f64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4499,7 +4127,6 @@ return vsoxei16_v_f64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4511,7 +4138,6 @@ return vsoxei16_v_f64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4523,7 +4149,6 @@ return vsoxei32_v_f64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4535,7 +4160,6 @@ return vsoxei32_v_f64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4547,7 +4171,6 @@ return vsoxei32_v_f64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4559,7 +4182,6 @@ return vsoxei32_v_f64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4571,7 +4193,6 @@ return vsoxei64_v_f64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4583,7 +4204,6 @@ return vsoxei64_v_f64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4595,7 +4215,6 @@ return vsoxei64_v_f64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsoxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4606,3 +4225,423 @@ vfloat64m8_t value, size_t vl) { return vsoxei64_v_f64m8_m(mask, base, bindex, value, vl); } + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei8_v_f16mf4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei8_v_f16mf2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei8_v_f16m1 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei8_v_f16m2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei8_v_f16m4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m8 (_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei8_v_f16m8 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei16_v_f16mf4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei16_v_f16mf2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei16_v_f16m1 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei16_v_f16m2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei16_v_f16m4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m8 (_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei16_v_f16m8 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei32_v_f16mf4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei32_v_f16mf2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei32_v_f16m1 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei32_v_f16m2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei32_v_f16m4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei64_v_f16mf4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei64_v_f16mf2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei64_v_f16m1 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei64_v_f16m2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei8_v_f16mf4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei8_v_f16mf2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei8_v_f16m1_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei8_v_f16m2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei8_v_f16m4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei8_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei8_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei8_v_f16m8_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei16_v_f16mf4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei16_v_f16mf2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei16_v_f16m1_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei16_v_f16m2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei16_v_f16m4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei16_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsoxei16_v_f16m8_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei32_v_f16mf4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei32_v_f16mf2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei32_v_f16m1_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei32_v_f16m2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsoxei32_v_f16m4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { + return vsoxei64_v_f16mf4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsoxei64_v_f16mf2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsoxei64_v_f16m1_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsoxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsoxei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsoxei64_v_f16m2_m (mask, base, bindex, value, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsse.c @@ -1,11 +1,11 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// // CHECK-RV64-LABEL: @test_vsse8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -17,7 +17,6 @@ return vsse8_v_i8mf8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -29,7 +28,6 @@ return vsse8_v_i8mf4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -41,7 +39,6 @@ return vsse8_v_i8mf2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -53,7 +50,6 @@ return vsse8_v_i8m1(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -65,7 +61,6 @@ return vsse8_v_i8m2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -77,7 +72,6 @@ return vsse8_v_i8m4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -89,7 +83,6 @@ return vsse8_v_i8m8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -101,7 +94,6 @@ return vsse16_v_i16mf4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -113,7 +105,6 @@ return vsse16_v_i16mf2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -125,7 +116,6 @@ return vsse16_v_i16m1(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -137,7 +127,6 @@ return vsse16_v_i16m2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -149,7 +138,6 @@ return vsse16_v_i16m4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -161,7 +149,6 @@ return vsse16_v_i16m8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -173,7 +160,6 @@ return vsse32_v_i32mf2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -185,7 +171,6 @@ return vsse32_v_i32m1(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -197,7 +182,6 @@ return vsse32_v_i32m2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -209,7 +193,6 @@ return vsse32_v_i32m4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -221,7 +204,6 @@ return vsse32_v_i32m8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -233,7 +215,6 @@ return vsse64_v_i64m1(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -245,7 +226,6 @@ return vsse64_v_i64m2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -257,7 +237,6 @@ return vsse64_v_i64m4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -269,7 +248,6 @@ return vsse64_v_i64m8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -281,7 +259,6 @@ return vsse8_v_u8mf8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -293,7 +270,6 @@ return vsse8_v_u8mf4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -305,7 +281,6 @@ return vsse8_v_u8mf2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -317,7 +292,6 @@ return vsse8_v_u8m1(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -329,7 +303,6 @@ return vsse8_v_u8m2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -341,7 +314,6 @@ return vsse8_v_u8m4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -353,7 +325,6 @@ return vsse8_v_u8m8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -365,7 +336,6 @@ return vsse16_v_u16mf4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -377,7 +347,6 @@ return vsse16_v_u16mf2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -389,7 +358,6 @@ return vsse16_v_u16m1(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -401,7 +369,6 @@ return vsse16_v_u16m2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -413,7 +380,6 @@ return vsse16_v_u16m4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -425,7 +391,6 @@ return vsse16_v_u16m8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -437,7 +402,6 @@ return vsse32_v_u32mf2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -449,7 +413,6 @@ return vsse32_v_u32m1(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -461,7 +424,6 @@ return vsse32_v_u32m2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -473,7 +435,6 @@ return vsse32_v_u32m4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -485,7 +446,6 @@ return vsse32_v_u32m8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -497,7 +457,6 @@ return vsse64_v_u64m1(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -509,7 +468,6 @@ return vsse64_v_u64m2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -521,7 +479,6 @@ return vsse64_v_u64m4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -533,7 +490,6 @@ return vsse64_v_u64m8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -545,7 +501,6 @@ return vsse32_v_f32mf2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -557,7 +512,6 @@ return vsse32_v_f32m1(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -569,7 +523,6 @@ return vsse32_v_f32m2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -581,7 +534,6 @@ return vsse32_v_f32m4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -593,7 +545,6 @@ return vsse32_v_f32m8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -605,7 +556,6 @@ return vsse64_v_f64m1(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -617,7 +567,6 @@ return vsse64_v_f64m2(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -629,7 +578,6 @@ return vsse64_v_f64m4(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -641,7 +589,6 @@ return vsse64_v_f64m8(base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -653,7 +600,6 @@ return vsse8_v_i8mf8_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -665,7 +611,6 @@ return vsse8_v_i8mf4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -677,7 +622,6 @@ return vsse8_v_i8mf2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -689,7 +633,6 @@ return vsse8_v_i8m1_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -701,7 +644,6 @@ return vsse8_v_i8m2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -713,7 +655,6 @@ return vsse8_v_i8m4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -725,7 +666,6 @@ return vsse8_v_i8m8_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -737,7 +677,6 @@ return vsse16_v_i16mf4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -749,7 +688,6 @@ return vsse16_v_i16mf2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -761,7 +699,6 @@ return vsse16_v_i16m1_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -773,7 +710,6 @@ return vsse16_v_i16m2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -785,7 +721,6 @@ return vsse16_v_i16m4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -797,7 +732,6 @@ return vsse16_v_i16m8_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -809,7 +743,6 @@ return vsse32_v_i32mf2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -821,7 +754,6 @@ return vsse32_v_i32m1_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -833,7 +765,6 @@ return vsse32_v_i32m2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -845,7 +776,6 @@ return vsse32_v_i32m4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -857,7 +787,6 @@ return vsse32_v_i32m8_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -869,7 +798,6 @@ return vsse64_v_i64m1_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -881,7 +809,6 @@ return vsse64_v_i64m2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -893,7 +820,6 @@ return vsse64_v_i64m4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -905,7 +831,6 @@ return vsse64_v_i64m8_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -917,7 +842,6 @@ return vsse8_v_u8mf8_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -929,7 +853,6 @@ return vsse8_v_u8mf4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -941,7 +864,6 @@ return vsse8_v_u8mf2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -953,7 +875,6 @@ return vsse8_v_u8m1_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -965,7 +886,6 @@ return vsse8_v_u8m2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -977,7 +897,6 @@ return vsse8_v_u8m4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -989,7 +908,6 @@ return vsse8_v_u8m8_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1001,7 +919,6 @@ return vsse16_v_u16mf4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1013,7 +930,6 @@ return vsse16_v_u16mf2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1025,7 +941,6 @@ return vsse16_v_u16m1_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1037,7 +952,6 @@ return vsse16_v_u16m2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1049,7 +963,6 @@ return vsse16_v_u16m4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1061,7 +974,6 @@ return vsse16_v_u16m8_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1073,7 +985,6 @@ return vsse32_v_u32mf2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1085,7 +996,6 @@ return vsse32_v_u32m1_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1097,7 +1007,6 @@ return vsse32_v_u32m2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1109,7 +1018,6 @@ return vsse32_v_u32m4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1121,7 +1029,6 @@ return vsse32_v_u32m8_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1133,7 +1040,6 @@ return vsse64_v_u64m1_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1145,7 +1051,6 @@ return vsse64_v_u64m2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1157,7 +1062,6 @@ return vsse64_v_u64m4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1169,7 +1073,6 @@ return vsse64_v_u64m8_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1181,7 +1084,6 @@ return vsse32_v_f32mf2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1193,7 +1095,6 @@ return vsse32_v_f32m1_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1205,7 +1106,6 @@ return vsse32_v_f32m2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1217,7 +1117,6 @@ return vsse32_v_f32m4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1229,7 +1128,6 @@ return vsse32_v_f32m8_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1241,7 +1139,6 @@ return vsse64_v_f64m1_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1253,7 +1150,6 @@ return vsse64_v_f64m2_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1265,7 +1161,6 @@ return vsse64_v_f64m4_m(mask, base, bstride, value, vl); } -// // CHECK-RV64-LABEL: @test_vsse64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -1276,3 +1171,123 @@ vfloat64m8_t value, size_t vl) { return vsse64_v_f64m8_m(mask, base, bstride, value, vl); } + +// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16mf4 (_Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { + return vsse16_v_f16mf4 (base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16mf2 (_Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { + return vsse16_v_f16mf2 (base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m1 (_Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { + return vsse16_v_f16m1 (base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m2 (_Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { + return vsse16_v_f16m2 (base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m4 (_Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { + return vsse16_v_f16m4 (base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m8 (_Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { + return vsse16_v_f16m8 (base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv1f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16mf4_m (vbool64_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf4_t value, size_t vl) { + return vsse16_v_f16mf4_m (mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv2f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16mf2_m (vbool32_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16mf2_t value, size_t vl) { + return vsse16_v_f16mf2_m (mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv4f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m1_m (vbool16_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m1_t value, size_t vl) { + return vsse16_v_f16m1_m (mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv8f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m2_m (vbool8_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m2_t value, size_t vl) { + return vsse16_v_f16m2_m (mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv16f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m4_m (vbool4_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m4_t value, size_t vl) { + return vsse16_v_f16m4_m (mask, base, bstride, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsse16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsse.mask.nxv32f16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[BSTRIDE:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsse16_v_f16m8_m (vbool2_t mask, _Float16 *base, ptrdiff_t bstride, vfloat16m8_t value, size_t vl) { + return vsse16_v_f16m8_m (mask, base, bstride, value, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsuxei.c @@ -1,11 +1,11 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +experimental-v -target-feature +experimental-zfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -17,7 +17,6 @@ return vsuxei8_v_i8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -29,7 +28,6 @@ return vsuxei8_v_i8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -41,7 +39,6 @@ return vsuxei8_v_i8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -53,7 +50,6 @@ return vsuxei8_v_i8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -65,7 +61,6 @@ return vsuxei8_v_i8m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -77,7 +72,6 @@ return vsuxei8_v_i8m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -89,7 +83,6 @@ return vsuxei8_v_i8m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -101,7 +94,6 @@ return vsuxei16_v_i8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -113,7 +105,6 @@ return vsuxei16_v_i8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -125,7 +116,6 @@ return vsuxei16_v_i8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -137,7 +127,6 @@ return vsuxei16_v_i8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -149,7 +138,6 @@ return vsuxei16_v_i8m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -161,7 +149,6 @@ return vsuxei16_v_i8m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -173,7 +160,6 @@ return vsuxei32_v_i8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -185,7 +171,6 @@ return vsuxei32_v_i8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -197,7 +182,6 @@ return vsuxei32_v_i8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -209,7 +193,6 @@ return vsuxei32_v_i8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -221,7 +204,6 @@ return vsuxei32_v_i8m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -233,7 +215,6 @@ return vsuxei64_v_i8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -245,7 +226,6 @@ return vsuxei64_v_i8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -257,7 +237,6 @@ return vsuxei64_v_i8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -269,7 +248,6 @@ return vsuxei64_v_i8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -281,7 +259,6 @@ return vsuxei8_v_i16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -293,7 +270,6 @@ return vsuxei8_v_i16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -305,7 +281,6 @@ return vsuxei8_v_i16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -317,7 +292,6 @@ return vsuxei8_v_i16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -329,7 +303,6 @@ return vsuxei8_v_i16m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -341,7 +314,6 @@ return vsuxei8_v_i16m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -353,7 +325,6 @@ return vsuxei16_v_i16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -365,7 +336,6 @@ return vsuxei16_v_i16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -377,7 +347,6 @@ return vsuxei16_v_i16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -389,7 +358,6 @@ return vsuxei16_v_i16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -401,7 +369,6 @@ return vsuxei16_v_i16m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -413,7 +380,6 @@ return vsuxei16_v_i16m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -425,7 +391,6 @@ return vsuxei32_v_i16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -437,7 +402,6 @@ return vsuxei32_v_i16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -449,7 +413,6 @@ return vsuxei32_v_i16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -461,7 +424,6 @@ return vsuxei32_v_i16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -473,7 +435,6 @@ return vsuxei32_v_i16m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -485,7 +446,6 @@ return vsuxei64_v_i16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -497,7 +457,6 @@ return vsuxei64_v_i16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -509,7 +468,6 @@ return vsuxei64_v_i16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -521,7 +479,6 @@ return vsuxei64_v_i16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -533,7 +490,6 @@ return vsuxei8_v_i32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -545,7 +501,6 @@ return vsuxei8_v_i32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -557,7 +512,6 @@ return vsuxei8_v_i32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -569,7 +523,6 @@ return vsuxei8_v_i32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -581,7 +534,6 @@ return vsuxei8_v_i32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -593,7 +545,6 @@ return vsuxei16_v_i32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -605,7 +556,6 @@ return vsuxei16_v_i32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -617,7 +567,6 @@ return vsuxei16_v_i32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -629,7 +578,6 @@ return vsuxei16_v_i32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -641,7 +589,6 @@ return vsuxei16_v_i32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -653,7 +600,6 @@ return vsuxei32_v_i32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -665,7 +611,6 @@ return vsuxei32_v_i32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -677,7 +622,6 @@ return vsuxei32_v_i32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -689,7 +633,6 @@ return vsuxei32_v_i32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -701,7 +644,6 @@ return vsuxei32_v_i32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -713,7 +655,6 @@ return vsuxei64_v_i32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -725,7 +666,6 @@ return vsuxei64_v_i32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -737,7 +677,6 @@ return vsuxei64_v_i32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -749,7 +688,6 @@ return vsuxei64_v_i32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -761,7 +699,6 @@ return vsuxei8_v_i64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -773,7 +710,6 @@ return vsuxei8_v_i64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -785,7 +721,6 @@ return vsuxei8_v_i64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -797,7 +732,6 @@ return vsuxei8_v_i64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -809,7 +743,6 @@ return vsuxei16_v_i64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -821,7 +754,6 @@ return vsuxei16_v_i64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -833,7 +765,6 @@ return vsuxei16_v_i64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -845,7 +776,6 @@ return vsuxei16_v_i64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -857,7 +787,6 @@ return vsuxei32_v_i64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -869,7 +798,6 @@ return vsuxei32_v_i64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -881,7 +809,6 @@ return vsuxei32_v_i64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -893,7 +820,6 @@ return vsuxei32_v_i64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -905,7 +831,6 @@ return vsuxei64_v_i64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -917,7 +842,6 @@ return vsuxei64_v_i64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -929,7 +853,6 @@ return vsuxei64_v_i64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -941,7 +864,6 @@ return vsuxei64_v_i64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -953,7 +875,6 @@ return vsuxei8_v_u8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -965,7 +886,6 @@ return vsuxei8_v_u8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -977,7 +897,6 @@ return vsuxei8_v_u8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -989,7 +908,6 @@ return vsuxei8_v_u8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1001,7 +919,6 @@ return vsuxei8_v_u8m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1013,7 +930,6 @@ return vsuxei8_v_u8m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1025,7 +941,6 @@ return vsuxei8_v_u8m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1037,7 +952,6 @@ return vsuxei16_v_u8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1049,7 +963,6 @@ return vsuxei16_v_u8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1061,7 +974,6 @@ return vsuxei16_v_u8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1073,7 +985,6 @@ return vsuxei16_v_u8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1085,7 +996,6 @@ return vsuxei16_v_u8m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1097,7 +1007,6 @@ return vsuxei16_v_u8m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1109,7 +1018,6 @@ return vsuxei32_v_u8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1121,7 +1029,6 @@ return vsuxei32_v_u8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1133,7 +1040,6 @@ return vsuxei32_v_u8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1145,7 +1051,6 @@ return vsuxei32_v_u8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1157,7 +1062,6 @@ return vsuxei32_v_u8m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1169,7 +1073,6 @@ return vsuxei64_v_u8mf8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1181,7 +1084,6 @@ return vsuxei64_v_u8mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1193,7 +1095,6 @@ return vsuxei64_v_u8mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -1205,7 +1106,6 @@ return vsuxei64_v_u8m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1217,7 +1117,6 @@ return vsuxei8_v_u16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1229,7 +1128,6 @@ return vsuxei8_v_u16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1241,7 +1139,6 @@ return vsuxei8_v_u16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1253,7 +1150,6 @@ return vsuxei8_v_u16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1265,7 +1161,6 @@ return vsuxei8_v_u16m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1277,7 +1172,6 @@ return vsuxei8_v_u16m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1289,7 +1183,6 @@ return vsuxei16_v_u16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1301,7 +1194,6 @@ return vsuxei16_v_u16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1313,7 +1205,6 @@ return vsuxei16_v_u16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1325,7 +1216,6 @@ return vsuxei16_v_u16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1337,7 +1227,6 @@ return vsuxei16_v_u16m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1349,7 +1238,6 @@ return vsuxei16_v_u16m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1361,7 +1249,6 @@ return vsuxei32_v_u16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1373,7 +1260,6 @@ return vsuxei32_v_u16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1385,7 +1271,6 @@ return vsuxei32_v_u16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1397,7 +1282,6 @@ return vsuxei32_v_u16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1409,7 +1293,6 @@ return vsuxei32_v_u16m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1421,7 +1304,6 @@ return vsuxei64_v_u16mf4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1433,7 +1315,6 @@ return vsuxei64_v_u16mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1445,7 +1326,6 @@ return vsuxei64_v_u16m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -1457,7 +1337,6 @@ return vsuxei64_v_u16m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1469,7 +1348,6 @@ return vsuxei8_v_u32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1481,7 +1359,6 @@ return vsuxei8_v_u32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1493,7 +1370,6 @@ return vsuxei8_v_u32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1505,7 +1381,6 @@ return vsuxei8_v_u32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1517,7 +1392,6 @@ return vsuxei8_v_u32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1529,7 +1403,6 @@ return vsuxei16_v_u32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1541,7 +1414,6 @@ return vsuxei16_v_u32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1553,7 +1425,6 @@ return vsuxei16_v_u32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1565,7 +1436,6 @@ return vsuxei16_v_u32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1577,7 +1447,6 @@ return vsuxei16_v_u32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1589,7 +1458,6 @@ return vsuxei32_v_u32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1601,7 +1469,6 @@ return vsuxei32_v_u32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1613,7 +1480,6 @@ return vsuxei32_v_u32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1625,7 +1491,6 @@ return vsuxei32_v_u32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1637,7 +1502,6 @@ return vsuxei32_v_u32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1649,7 +1513,6 @@ return vsuxei64_v_u32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1661,7 +1524,6 @@ return vsuxei64_v_u32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1673,7 +1535,6 @@ return vsuxei64_v_u32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -1685,7 +1546,6 @@ return vsuxei64_v_u32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1697,7 +1557,6 @@ return vsuxei8_v_u64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1709,7 +1568,6 @@ return vsuxei8_v_u64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1721,7 +1579,6 @@ return vsuxei8_v_u64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1733,7 +1590,6 @@ return vsuxei8_v_u64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1745,7 +1601,6 @@ return vsuxei16_v_u64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1757,7 +1612,6 @@ return vsuxei16_v_u64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1769,7 +1623,6 @@ return vsuxei16_v_u64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1781,7 +1634,6 @@ return vsuxei16_v_u64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1793,7 +1645,6 @@ return vsuxei32_v_u64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1805,7 +1656,6 @@ return vsuxei32_v_u64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1817,7 +1667,6 @@ return vsuxei32_v_u64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1829,7 +1678,6 @@ return vsuxei32_v_u64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1841,7 +1689,6 @@ return vsuxei64_v_u64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1853,7 +1700,6 @@ return vsuxei64_v_u64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1865,7 +1711,6 @@ return vsuxei64_v_u64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -1877,7 +1722,6 @@ return vsuxei64_v_u64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1889,7 +1733,6 @@ return vsuxei8_v_f32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1901,7 +1744,6 @@ return vsuxei8_v_f32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1913,7 +1755,6 @@ return vsuxei8_v_f32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1925,7 +1766,6 @@ return vsuxei8_v_f32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1937,7 +1777,6 @@ return vsuxei8_v_f32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1949,7 +1788,6 @@ return vsuxei16_v_f32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1961,7 +1799,6 @@ return vsuxei16_v_f32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1973,7 +1810,6 @@ return vsuxei16_v_f32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1985,7 +1821,6 @@ return vsuxei16_v_f32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -1997,7 +1832,6 @@ return vsuxei16_v_f32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2009,7 +1843,6 @@ return vsuxei32_v_f32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2021,7 +1854,6 @@ return vsuxei32_v_f32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2033,7 +1865,6 @@ return vsuxei32_v_f32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2045,7 +1876,6 @@ return vsuxei32_v_f32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2057,7 +1887,6 @@ return vsuxei32_v_f32m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2069,7 +1898,6 @@ return vsuxei64_v_f32mf2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2081,7 +1909,6 @@ return vsuxei64_v_f32m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2093,7 +1920,6 @@ return vsuxei64_v_f32m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -2105,7 +1931,6 @@ return vsuxei64_v_f32m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2117,7 +1942,6 @@ return vsuxei8_v_f64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2129,7 +1953,6 @@ return vsuxei8_v_f64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2141,7 +1964,6 @@ return vsuxei8_v_f64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2153,7 +1975,6 @@ return vsuxei8_v_f64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2165,7 +1986,6 @@ return vsuxei16_v_f64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2177,7 +1997,6 @@ return vsuxei16_v_f64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2189,7 +2008,6 @@ return vsuxei16_v_f64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2201,7 +2019,6 @@ return vsuxei16_v_f64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2213,7 +2030,6 @@ return vsuxei32_v_f64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2225,7 +2041,6 @@ return vsuxei32_v_f64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2237,7 +2052,6 @@ return vsuxei32_v_f64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2249,7 +2063,6 @@ return vsuxei32_v_f64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2261,7 +2074,6 @@ return vsuxei64_v_f64m1(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2273,7 +2085,6 @@ return vsuxei64_v_f64m2(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2285,7 +2096,6 @@ return vsuxei64_v_f64m4(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -2297,7 +2107,6 @@ return vsuxei64_v_f64m8(base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2309,7 +2118,6 @@ return vsuxei8_v_i8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2321,7 +2129,6 @@ return vsuxei8_v_i8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2333,7 +2140,6 @@ return vsuxei8_v_i8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2345,7 +2151,6 @@ return vsuxei8_v_i8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2357,7 +2162,6 @@ return vsuxei8_v_i8m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2369,7 +2173,6 @@ return vsuxei8_v_i8m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2381,7 +2184,6 @@ return vsuxei8_v_i8m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2393,7 +2195,6 @@ return vsuxei16_v_i8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2405,7 +2206,6 @@ return vsuxei16_v_i8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2417,7 +2217,6 @@ return vsuxei16_v_i8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2429,7 +2228,6 @@ return vsuxei16_v_i8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2441,7 +2239,6 @@ return vsuxei16_v_i8m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2453,7 +2250,6 @@ return vsuxei16_v_i8m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2465,7 +2261,6 @@ return vsuxei32_v_i8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2477,7 +2272,6 @@ return vsuxei32_v_i8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2489,7 +2283,6 @@ return vsuxei32_v_i8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2501,7 +2294,6 @@ return vsuxei32_v_i8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2513,7 +2305,6 @@ return vsuxei32_v_i8m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2525,7 +2316,6 @@ return vsuxei64_v_i8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2537,7 +2327,6 @@ return vsuxei64_v_i8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2549,7 +2338,6 @@ return vsuxei64_v_i8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -2561,7 +2349,6 @@ return vsuxei64_v_i8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2573,7 +2360,6 @@ return vsuxei8_v_i16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2585,7 +2371,6 @@ return vsuxei8_v_i16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2597,7 +2382,6 @@ return vsuxei8_v_i16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2609,7 +2393,6 @@ return vsuxei8_v_i16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2621,7 +2404,6 @@ return vsuxei8_v_i16m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2633,7 +2415,6 @@ return vsuxei8_v_i16m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2646,7 +2427,6 @@ return vsuxei16_v_i16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2659,7 +2439,6 @@ return vsuxei16_v_i16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2671,7 +2450,6 @@ return vsuxei16_v_i16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2683,7 +2461,6 @@ return vsuxei16_v_i16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2695,7 +2472,6 @@ return vsuxei16_v_i16m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2707,7 +2483,6 @@ return vsuxei16_v_i16m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2720,7 +2495,6 @@ return vsuxei32_v_i16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2732,7 +2506,6 @@ return vsuxei32_v_i16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2744,7 +2517,6 @@ return vsuxei32_v_i16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2756,7 +2528,6 @@ return vsuxei32_v_i16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2768,7 +2539,6 @@ return vsuxei32_v_i16m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2780,7 +2550,6 @@ return vsuxei64_v_i16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2792,7 +2561,6 @@ return vsuxei64_v_i16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2804,7 +2572,6 @@ return vsuxei64_v_i16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -2816,7 +2583,6 @@ return vsuxei64_v_i16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2828,7 +2594,6 @@ return vsuxei8_v_i32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2840,7 +2605,6 @@ return vsuxei8_v_i32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2852,7 +2616,6 @@ return vsuxei8_v_i32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2864,7 +2627,6 @@ return vsuxei8_v_i32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2876,7 +2638,6 @@ return vsuxei8_v_i32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2889,7 +2650,6 @@ return vsuxei16_v_i32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2901,7 +2661,6 @@ return vsuxei16_v_i32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2913,7 +2672,6 @@ return vsuxei16_v_i32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2925,7 +2683,6 @@ return vsuxei16_v_i32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2937,7 +2694,6 @@ return vsuxei16_v_i32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2950,7 +2706,6 @@ return vsuxei32_v_i32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2962,7 +2717,6 @@ return vsuxei32_v_i32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2974,7 +2728,6 @@ return vsuxei32_v_i32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2986,7 +2739,6 @@ return vsuxei32_v_i32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -2998,7 +2750,6 @@ return vsuxei32_v_i32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3010,7 +2761,6 @@ return vsuxei64_v_i32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3022,7 +2772,6 @@ return vsuxei64_v_i32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3034,7 +2783,6 @@ return vsuxei64_v_i32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3046,7 +2794,6 @@ return vsuxei64_v_i32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3058,7 +2805,6 @@ return vsuxei8_v_i64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3070,7 +2816,6 @@ return vsuxei8_v_i64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3082,7 +2827,6 @@ return vsuxei8_v_i64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3094,7 +2838,6 @@ return vsuxei8_v_i64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3106,7 +2849,6 @@ return vsuxei16_v_i64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3118,7 +2860,6 @@ return vsuxei16_v_i64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3130,7 +2871,6 @@ return vsuxei16_v_i64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3142,7 +2882,6 @@ return vsuxei16_v_i64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3154,7 +2893,6 @@ return vsuxei32_v_i64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3166,7 +2904,6 @@ return vsuxei32_v_i64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3178,7 +2915,6 @@ return vsuxei32_v_i64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3190,7 +2926,6 @@ return vsuxei32_v_i64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3202,7 +2937,6 @@ return vsuxei64_v_i64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3214,7 +2948,6 @@ return vsuxei64_v_i64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3226,7 +2959,6 @@ return vsuxei64_v_i64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -3238,7 +2970,6 @@ return vsuxei64_v_i64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3250,7 +2981,6 @@ return vsuxei8_v_u8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3262,7 +2992,6 @@ return vsuxei8_v_u8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3274,7 +3003,6 @@ return vsuxei8_v_u8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3286,7 +3014,6 @@ return vsuxei8_v_u8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3298,7 +3025,6 @@ return vsuxei8_v_u8m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3310,7 +3036,6 @@ return vsuxei8_v_u8m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u8m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3322,7 +3047,6 @@ return vsuxei8_v_u8m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3334,7 +3058,6 @@ return vsuxei16_v_u8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3346,7 +3069,6 @@ return vsuxei16_v_u8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3358,7 +3080,6 @@ return vsuxei16_v_u8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3370,7 +3091,6 @@ return vsuxei16_v_u8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3382,7 +3102,6 @@ return vsuxei16_v_u8m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3394,7 +3113,6 @@ return vsuxei16_v_u8m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3406,7 +3124,6 @@ return vsuxei32_v_u8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3418,7 +3135,6 @@ return vsuxei32_v_u8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3430,7 +3146,6 @@ return vsuxei32_v_u8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3442,7 +3157,6 @@ return vsuxei32_v_u8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3454,7 +3168,6 @@ return vsuxei32_v_u8m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3466,7 +3179,6 @@ return vsuxei64_v_u8mf8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3478,7 +3190,6 @@ return vsuxei64_v_u8mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3490,7 +3201,6 @@ return vsuxei64_v_u8mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * @@ -3502,7 +3212,6 @@ return vsuxei64_v_u8m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3514,7 +3223,6 @@ return vsuxei8_v_u16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3526,7 +3234,6 @@ return vsuxei8_v_u16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3538,7 +3245,6 @@ return vsuxei8_v_u16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3550,7 +3256,6 @@ return vsuxei8_v_u16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3562,7 +3267,6 @@ return vsuxei8_v_u16m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3574,7 +3278,6 @@ return vsuxei8_v_u16m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3587,7 +3290,6 @@ return vsuxei16_v_u16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3600,7 +3302,6 @@ return vsuxei16_v_u16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3612,7 +3313,6 @@ return vsuxei16_v_u16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3624,7 +3324,6 @@ return vsuxei16_v_u16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3636,7 +3335,6 @@ return vsuxei16_v_u16m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3648,7 +3346,6 @@ return vsuxei16_v_u16m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3661,7 +3358,6 @@ return vsuxei32_v_u16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3674,7 +3370,6 @@ return vsuxei32_v_u16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3686,7 +3381,6 @@ return vsuxei32_v_u16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3698,7 +3392,6 @@ return vsuxei32_v_u16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3710,7 +3403,6 @@ return vsuxei32_v_u16m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3723,7 +3415,6 @@ return vsuxei64_v_u16mf4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3736,7 +3427,6 @@ return vsuxei64_v_u16mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3748,7 +3438,6 @@ return vsuxei64_v_u16m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * @@ -3760,7 +3449,6 @@ return vsuxei64_v_u16m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3772,7 +3460,6 @@ return vsuxei8_v_u32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3784,7 +3471,6 @@ return vsuxei8_v_u32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3796,7 +3482,6 @@ return vsuxei8_v_u32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3808,7 +3493,6 @@ return vsuxei8_v_u32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3820,7 +3504,6 @@ return vsuxei8_v_u32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3833,7 +3516,6 @@ return vsuxei16_v_u32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3846,7 +3528,6 @@ return vsuxei16_v_u32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3858,7 +3539,6 @@ return vsuxei16_v_u32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3870,7 +3550,6 @@ return vsuxei16_v_u32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3882,7 +3561,6 @@ return vsuxei16_v_u32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3895,7 +3573,6 @@ return vsuxei32_v_u32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3907,7 +3584,6 @@ return vsuxei32_v_u32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3919,7 +3595,6 @@ return vsuxei32_v_u32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3931,7 +3606,6 @@ return vsuxei32_v_u32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3943,7 +3617,6 @@ return vsuxei32_v_u32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3956,7 +3629,6 @@ return vsuxei64_v_u32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3968,7 +3640,6 @@ return vsuxei64_v_u32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3980,7 +3651,6 @@ return vsuxei64_v_u32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * @@ -3992,7 +3662,6 @@ return vsuxei64_v_u32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4004,7 +3673,6 @@ return vsuxei8_v_u64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4016,7 +3684,6 @@ return vsuxei8_v_u64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4028,7 +3695,6 @@ return vsuxei8_v_u64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4040,7 +3706,6 @@ return vsuxei8_v_u64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4053,7 +3718,6 @@ return vsuxei16_v_u64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4066,7 +3730,6 @@ return vsuxei16_v_u64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4078,7 +3741,6 @@ return vsuxei16_v_u64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4090,7 +3752,6 @@ return vsuxei16_v_u64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4103,7 +3764,6 @@ return vsuxei32_v_u64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4115,7 +3775,6 @@ return vsuxei32_v_u64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4127,7 +3786,6 @@ return vsuxei32_v_u64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4139,7 +3797,6 @@ return vsuxei32_v_u64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4151,7 +3808,6 @@ return vsuxei64_v_u64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4163,7 +3819,6 @@ return vsuxei64_v_u64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4175,7 +3830,6 @@ return vsuxei64_v_u64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * @@ -4187,7 +3841,6 @@ return vsuxei64_v_u64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4199,7 +3852,6 @@ return vsuxei8_v_f32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4211,7 +3863,6 @@ return vsuxei8_v_f32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4223,7 +3874,6 @@ return vsuxei8_v_f32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4235,7 +3885,6 @@ return vsuxei8_v_f32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4247,7 +3896,6 @@ return vsuxei8_v_f32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4259,7 +3907,6 @@ return vsuxei16_v_f32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4271,7 +3918,6 @@ return vsuxei16_v_f32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4283,7 +3929,6 @@ return vsuxei16_v_f32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4295,7 +3940,6 @@ return vsuxei16_v_f32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4307,7 +3951,6 @@ return vsuxei16_v_f32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4319,7 +3962,6 @@ return vsuxei32_v_f32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4331,7 +3973,6 @@ return vsuxei32_v_f32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4343,7 +3984,6 @@ return vsuxei32_v_f32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4355,7 +3995,6 @@ return vsuxei32_v_f32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4367,7 +4006,6 @@ return vsuxei32_v_f32m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4379,7 +4017,6 @@ return vsuxei64_v_f32mf2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4391,7 +4028,6 @@ return vsuxei64_v_f32m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4403,7 +4039,6 @@ return vsuxei64_v_f32m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * @@ -4415,7 +4050,6 @@ return vsuxei64_v_f32m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4427,7 +4061,6 @@ return vsuxei8_v_f64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4439,7 +4072,6 @@ return vsuxei8_v_f64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4451,7 +4083,6 @@ return vsuxei8_v_f64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei8_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4463,7 +4094,6 @@ return vsuxei8_v_f64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4475,7 +4105,6 @@ return vsuxei16_v_f64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4487,7 +4116,6 @@ return vsuxei16_v_f64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4499,7 +4127,6 @@ return vsuxei16_v_f64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei16_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4511,7 +4138,6 @@ return vsuxei16_v_f64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4523,7 +4149,6 @@ return vsuxei32_v_f64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4535,7 +4160,6 @@ return vsuxei32_v_f64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4547,7 +4171,6 @@ return vsuxei32_v_f64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei32_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4559,7 +4182,6 @@ return vsuxei32_v_f64m8_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4571,7 +4193,6 @@ return vsuxei64_v_f64m1_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4583,7 +4204,6 @@ return vsuxei64_v_f64m2_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4595,7 +4215,6 @@ return vsuxei64_v_f64m4_m(mask, base, bindex, value, vl); } -// // CHECK-RV64-LABEL: @test_vsuxei64_v_f64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * @@ -4606,3 +4225,423 @@ vfloat64m8_t value, size_t vl) { return vsuxei64_v_f64m8_m(mask, base, bindex, value, vl); } + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16mf4 (_Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei8_v_f16mf4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16mf2 (_Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei8_v_f16mf2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m1 (_Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei8_v_f16m1 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m2 (_Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei8_v_f16m2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m4 (_Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei8_v_f16m4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m8 (_Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei8_v_f16m8 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16mf4 (_Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei16_v_f16mf4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16mf2 (_Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei16_v_f16mf2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m1 (_Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei16_v_f16m1 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m2 (_Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei16_v_f16m2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m4 (_Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei16_v_f16m4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m8 (_Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei16_v_f16m8 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf4 (_Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei32_v_f16mf4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf2 (_Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei32_v_f16mf2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m1 (_Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei32_v_f16m1 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m2 (_Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei32_v_f16m2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m4 (_Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei32_v_f16m4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf4 (_Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei64_v_f16mf4 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf2 (_Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei64_v_f16mf2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m1 (_Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei64_v_f16m1 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m2 (_Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei64_v_f16m2 (base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint8mf8_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei8_v_f16mf4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint8mf4_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei8_v_f16mf2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint8mf2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei8_v_f16m1_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint8m1_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei8_v_f16m2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint8m2_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei8_v_f16m4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei8_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei8_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint8m4_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei8_v_f16m8_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint16mf4_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei16_v_f16mf4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint16mf2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei16_v_f16mf2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint16m1_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei16_v_f16m1_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint16m2_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei16_v_f16m2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint16m4_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei16_v_f16m4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei16_v_f16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei16_v_f16m8_m (vbool2_t mask, _Float16 *base, vuint16m8_t bindex, vfloat16m8_t value, size_t vl) { + return vsuxei16_v_f16m8_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint32mf2_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei32_v_f16mf4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint32m1_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei32_v_f16mf2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint32m2_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei32_v_f16m1_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint32m4_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei32_v_f16m2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei32_v_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei32_v_f16m4_m (vbool4_t mask, _Float16 *base, vuint32m8_t bindex, vfloat16m4_t value, size_t vl) { + return vsuxei32_v_f16m4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf4_m (vbool64_t mask, _Float16 *base, vuint64m1_t bindex, vfloat16mf4_t value, size_t vl) { + return vsuxei64_v_f16mf4_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16mf2_m (vbool32_t mask, _Float16 *base, vuint64m2_t bindex, vfloat16mf2_t value, size_t vl) { + return vsuxei64_v_f16mf2_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m1_m (vbool16_t mask, _Float16 *base, vuint64m4_t bindex, vfloat16m1_t value, size_t vl) { + return vsuxei64_v_f16m1_m (mask, base, bindex, value, vl); +} + +// CHECK-RV64-LABEL: @test_vsuxei64_v_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast half* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[BINDEX:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vsuxei64_v_f16m2_m (vbool8_t mask, _Float16 *base, vuint64m8_t bindex, vfloat16m2_t value, size_t vl) { + return vsuxei64_v_f16m2_m (mask, base, bindex, value, vl); +}