diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -169,6 +169,12 @@ // parameter of the unmasked version. k can't be the mask operand's position. list IntrinsicTypes = []; + // When the order of the parameters of clang builtin do not match the order of + // C/C++ api, we use permutation index to mapping the operand from clang + // builtin to C/C++. It is parameter of the unmasked version without VL + // operand. If empty, the default permutation is [0, 1, 2, ...]. + list PermuteOperands = []; + // If these names are not empty, this is the ID of the LLVM intrinsic // we want to lower to. string IRName = NAME; @@ -204,6 +210,55 @@ } } +class IsFloat { + bit val = !or(!eq(type, "h"), !eq(type, "f"), !eq(type, "d")); +} + +multiclass RVVVLEBuiltin types> { + let Name = NAME # "_v", + IRName = "vle", + IRNameMask ="vle_mask", + HasGeneric = false, + ManualCodegen = [{ + IntrinsicTypes = {ResultType, Ops[1]->getType()}; + Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo()); + }], + ManualCodegenMask= [{ + IntrinsicTypes = {ResultType, Ops[3]->getType()}; + Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); + }] in { + foreach type = types in { + def : RVVBuiltin<"v", "vPCe", type>; + if !not(IsFloat.val) then { + def : RVVBuiltin<"Uv", "UvPCUe", type>; + } + } + } +} + +multiclass RVVVSEBuiltin types> { + let Name = NAME # "_v", + IRName = "vse", + IRNameMask = "vse_mask", + HasMaskedOffOperand = false, + PermuteOperands = [1, 0], // C/C++ Operand: (ptr, value, vl). Builtin: (value, ptr, vl) + HasGeneric = false, + ManualCodegen = [{ + Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); + IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()}; + }], + ManualCodegenMask= [{ + Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo()); + IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()}; + }] in { + foreach type = types in { + def : RVVBuiltin<"v", "0vPe", type>; + if !not(IsFloat.val) then { + def : RVVBuiltin<"Uv", "0UvPUe", type>; + } + } + } +} // 6. Configuration-Setting Instructions // 6.1. vsetvli/vsetvl instructions @@ -278,6 +333,18 @@ def vsetvlimax : RVVBuiltin<"", "zKzKz", "i">; } +// 7. Vector Loads and Stores +// 7.4. Vector Unit-Stride Instructions +defm vle8: RVVVLEBuiltin<["c"]>; +defm vle16: RVVVLEBuiltin<["s"]>; +defm vle32: RVVVLEBuiltin<["i","f"]>; +defm vle64: RVVVLEBuiltin<["l","d"]>; + +defm vse8 : RVVVSEBuiltin<["c"]>; +defm vse16: RVVVSEBuiltin<["s"]>; +defm vse32: RVVVSEBuiltin<["i","f"]>; +defm vse64: RVVVSEBuiltin<["l","d"]>; + // 12. Vector Integer Arithmetic Instructions // 12.1. Vector Single-Width Integer Add and Subtract defm vadd : RVVBinBuiltinSet<"vadd", "csil", diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vle.c @@ -0,0 +1,1706 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vle8_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vle8_v_i8mf8(const int8_t *base, size_t vl) { + return vle8_v_i8mf8(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vle8_v_i8mf4(const int8_t *base, size_t vl) { + return vle8_v_i8mf4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vle8_v_i8mf2(const int8_t *base, size_t vl) { + return vle8_v_i8mf2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vle8_v_i8m1(const int8_t *base, size_t vl) { + return vle8_v_i8m1(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vle8_v_i8m2(const int8_t *base, size_t vl) { + return vle8_v_i8m2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vle8_v_i8m4(const int8_t *base, size_t vl) { + return vle8_v_i8m4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv64i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv64i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vle8_v_i8m8(const int8_t *base, size_t vl) { + return vle8_v_i8m8(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i16.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vle16_v_i16mf4(const int16_t *base, size_t vl) { + return vle16_v_i16mf4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i16.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vle16_v_i16mf2(const int16_t *base, size_t vl) { + return vle16_v_i16mf2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i16.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vle16_v_i16m1(const int16_t *base, size_t vl) { + return vle16_v_i16m1(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i16.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vle16_v_i16m2(const int16_t *base, size_t vl) { + return vle16_v_i16m2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i16.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vle16_v_i16m4(const int16_t *base, size_t vl) { + return vle16_v_i16m4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i16.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vle16_v_i16m8(const int16_t *base, size_t vl) { + return vle16_v_i16m8(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vle32_v_i32mf2(const int32_t *base, size_t vl) { + return vle32_v_i32mf2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vle32_v_i32m1(const int32_t *base, size_t vl) { + return vle32_v_i32m1(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vle32_v_i32m2(const int32_t *base, size_t vl) { + return vle32_v_i32m2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vle32_v_i32m4(const int32_t *base, size_t vl) { + return vle32_v_i32m4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vle32_v_i32m8(const int32_t *base, size_t vl) { + return vle32_v_i32m8(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i64.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vle64_v_i64m1(const int64_t *base, size_t vl) { + return vle64_v_i64m1(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i64.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vle64_v_i64m2(const int64_t *base, size_t vl) { + return vle64_v_i64m2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i64.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vle64_v_i64m4(const int64_t *base, size_t vl) { + return vle64_v_i64m4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i64.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vle64_v_i64m8(const int64_t *base, size_t vl) { + return vle64_v_i64m8(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vle8_v_u8mf8(const uint8_t *base, size_t vl) { + return vle8_v_u8mf8(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vle8_v_u8mf4(const uint8_t *base, size_t vl) { + return vle8_v_u8mf4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vle8_v_u8mf2(const uint8_t *base, size_t vl) { + return vle8_v_u8mf2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vle8_v_u8m1(const uint8_t *base, size_t vl) { + return vle8_v_u8m1(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vle8_v_u8m2(const uint8_t *base, size_t vl) { + return vle8_v_u8m2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vle8_v_u8m4(const uint8_t *base, size_t vl) { + return vle8_v_u8m4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv64i8.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv64i8.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vle8_v_u8m8(const uint8_t *base, size_t vl) { + return vle8_v_u8m8(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i16.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vle16_v_u16mf4(const uint16_t *base, size_t vl) { + return vle16_v_u16mf4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i16.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vle16_v_u16mf2(const uint16_t *base, size_t vl) { + return vle16_v_u16mf2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i16.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vle16_v_u16m1(const uint16_t *base, size_t vl) { + return vle16_v_u16m1(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i16.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vle16_v_u16m2(const uint16_t *base, size_t vl) { + return vle16_v_u16m2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i16.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vle16_v_u16m4(const uint16_t *base, size_t vl) { + return vle16_v_u16m4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i16.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv32i16.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vle16_v_u16m8(const uint16_t *base, size_t vl) { + return vle16_v_u16m8(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vle32_v_u32mf2(const uint32_t *base, size_t vl) { + return vle32_v_u32mf2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vle32_v_u32m1(const uint32_t *base, size_t vl) { + return vle32_v_u32m1(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vle32_v_u32m2(const uint32_t *base, size_t vl) { + return vle32_v_u32m2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vle32_v_u32m4(const uint32_t *base, size_t vl) { + return vle32_v_u32m4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16i32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vle32_v_u32m8(const uint32_t *base, size_t vl) { + return vle32_v_u32m8(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i64.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vle64_v_u64m1(const uint64_t *base, size_t vl) { + return vle64_v_u64m1(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i64.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vle64_v_u64m2(const uint64_t *base, size_t vl) { + return vle64_v_u64m2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i64.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vle64_v_u64m4(const uint64_t *base, size_t vl) { + return vle64_v_u64m4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i64.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8i64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vle64_v_u64m8(const uint64_t *base, size_t vl) { + return vle64_v_u64m8(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vle32_v_f32mf2(const float *base, size_t vl) { + return vle32_v_f32mf2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vle32_v_f32m1(const float *base, size_t vl) { + return vle32_v_f32m1(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vle32_v_f32m2(const float *base, size_t vl) { + return vle32_v_f32m2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vle32_v_f32m4(const float *base, size_t vl) { + return vle32_v_f32m4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16f32.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv16f32.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vle32_v_f32m8(const float *base, size_t vl) { + return vle32_v_f32m8(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f64.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv1f64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vle64_v_f64m1(const double *base, size_t vl) { + return vle64_v_f64m1(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f64.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv2f64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vle64_v_f64m2(const double *base, size_t vl) { + return vle64_v_f64m2(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f64.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv4f64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vle64_v_f64m4(const double *base, size_t vl) { + return vle64_v_f64m4(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f64.i32(* [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.nxv8f64.i64(* [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vle64_v_f64m8(const double *base, size_t vl) { + return vle64_v_f64m8(base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf8_t test_vle8_v_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, const int8_t *base, size_t vl) { + return vle8_v_i8mf8_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf4_t test_vle8_v_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, const int8_t *base, size_t vl) { + return vle8_v_i8mf4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8mf2_t test_vle8_v_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, const int8_t *base, size_t vl) { + return vle8_v_i8mf2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m1_t test_vle8_v_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, const int8_t *base, size_t vl) { + return vle8_v_i8m1_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m2_t test_vle8_v_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, const int8_t *base, size_t vl) { + return vle8_v_i8m2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m4_t test_vle8_v_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, const int8_t *base, size_t vl) { + return vle8_v_i8m4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint8m8_t test_vle8_v_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, const int8_t *base, size_t vl) { + return vle8_v_i8m8_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf4_t test_vle16_v_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, const int16_t *base, size_t vl) { + return vle16_v_i16mf4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16mf2_t test_vle16_v_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, const int16_t *base, size_t vl) { + return vle16_v_i16mf2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m1_t test_vle16_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, const int16_t *base, size_t vl) { + return vle16_v_i16m1_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m2_t test_vle16_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, const int16_t *base, size_t vl) { + return vle16_v_i16m2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m4_t test_vle16_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, const int16_t *base, size_t vl) { + return vle16_v_i16m4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint16m8_t test_vle16_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, const int16_t *base, size_t vl) { + return vle16_v_i16m8_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32mf2_t test_vle32_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, const int32_t *base, size_t vl) { + return vle32_v_i32mf2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m1_t test_vle32_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, const int32_t *base, size_t vl) { + return vle32_v_i32m1_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m2_t test_vle32_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, const int32_t *base, size_t vl) { + return vle32_v_i32m2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m4_t test_vle32_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, const int32_t *base, size_t vl) { + return vle32_v_i32m4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint32m8_t test_vle32_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, const int32_t *base, size_t vl) { + return vle32_v_i32m8_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m1_t test_vle64_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, const int64_t *base, size_t vl) { + return vle64_v_i64m1_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m2_t test_vle64_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, const int64_t *base, size_t vl) { + return vle64_v_i64m2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m4_t test_vle64_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, const int64_t *base, size_t vl) { + return vle64_v_i64m4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vint64m8_t test_vle64_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, const int64_t *base, size_t vl) { + return vle64_v_i64m8_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf8_t test_vle8_v_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, const uint8_t *base, size_t vl) { + return vle8_v_u8mf8_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf4_t test_vle8_v_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, const uint8_t *base, size_t vl) { + return vle8_v_u8mf4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8mf2_t test_vle8_v_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, const uint8_t *base, size_t vl) { + return vle8_v_u8mf2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m1_t test_vle8_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, const uint8_t *base, size_t vl) { + return vle8_v_u8m1_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m2_t test_vle8_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, const uint8_t *base, size_t vl) { + return vle8_v_u8m2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m4_t test_vle8_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, const uint8_t *base, size_t vl) { + return vle8_v_u8m4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle8_v_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle8_v_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv64i8.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint8m8_t test_vle8_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, const uint8_t *base, size_t vl) { + return vle8_v_u8m8_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf4_t test_vle16_v_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, const uint16_t *base, size_t vl) { + return vle16_v_u16mf4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16mf2_t test_vle16_v_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, const uint16_t *base, size_t vl) { + return vle16_v_u16mf2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m1_t test_vle16_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, const uint16_t *base, size_t vl) { + return vle16_v_u16m1_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m2_t test_vle16_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, const uint16_t *base, size_t vl) { + return vle16_v_u16m2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m4_t test_vle16_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, const uint16_t *base, size_t vl) { + return vle16_v_u16m4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle16_v_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle16_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv32i16.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint16m8_t test_vle16_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, const uint16_t *base, size_t vl) { + return vle16_v_u16m8_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32mf2_t test_vle32_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, const uint32_t *base, size_t vl) { + return vle32_v_u32mf2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m1_t test_vle32_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, const uint32_t *base, size_t vl) { + return vle32_v_u32m1_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m2_t test_vle32_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, const uint32_t *base, size_t vl) { + return vle32_v_u32m2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m4_t test_vle32_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, const uint32_t *base, size_t vl) { + return vle32_v_u32m4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16i32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint32m8_t test_vle32_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, const uint32_t *base, size_t vl) { + return vle32_v_u32m8_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m1_t test_vle64_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, const uint64_t *base, size_t vl) { + return vle64_v_u64m1_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m2_t test_vle64_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, const uint64_t *base, size_t vl) { + return vle64_v_u64m2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m4_t test_vle64_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, const uint64_t *base, size_t vl) { + return vle64_v_u64m4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8i64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vuint64m8_t test_vle64_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, const uint64_t *base, size_t vl) { + return vle64_v_u64m8_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32mf2_t test_vle32_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, const float *base, size_t vl) { + return vle32_v_f32mf2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m1_t test_vle32_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, const float *base, size_t vl) { + return vle32_v_f32m1_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m2_t test_vle32_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, const float *base, size_t vl) { + return vle32_v_f32m2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m4_t test_vle32_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, const float *base, size_t vl) { + return vle32_v_f32m4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle32_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle32_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat32m8_t test_vle32_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, const float *base, size_t vl) { + return vle32_v_f32m8_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m1_t test_vle64_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, const double *base, size_t vl) { + return vle64_v_f64m1_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m2_t test_vle64_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, const double *base, size_t vl) { + return vle64_v_f64m2_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m4_t test_vle64_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, const double *base, size_t vl) { + return vle64_v_f64m4_m(mask, maskedoff, base, vl); +} + +// CHECK-RV32-LABEL: @test_vle64_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP1]] +// +// CHECK-RV64-LABEL: @test_vle64_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: [[TMP1:%.*]] = call @llvm.riscv.vle.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP1]] +// +vfloat64m8_t test_vle64_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, const double *base, size_t vl) { + return vle64_v_f64m8_m(mask, maskedoff, base, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vse.c @@ -0,0 +1,1706 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vse8_v_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8mf8(int8_t *base, vint8mf8_t value, size_t vl) { + return vse8_v_i8mf8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8mf4(int8_t *base, vint8mf4_t value, size_t vl) { + return vse8_v_i8mf4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8mf2(int8_t *base, vint8mf2_t value, size_t vl) { + return vse8_v_i8mf2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m1(int8_t *base, vint8m1_t value, size_t vl) { + return vse8_v_i8m1(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m2(int8_t *base, vint8m2_t value, size_t vl) { + return vse8_v_i8m2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m4(int8_t *base, vint8m4_t value, size_t vl) { + return vse8_v_i8m4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m8(int8_t *base, vint8m8_t value, size_t vl) { + return vse8_v_i8m8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16mf4(int16_t *base, vint16mf4_t value, size_t vl) { + return vse16_v_i16mf4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16mf2(int16_t *base, vint16mf2_t value, size_t vl) { + return vse16_v_i16mf2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m1(int16_t *base, vint16m1_t value, size_t vl) { + return vse16_v_i16m1(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m2(int16_t *base, vint16m2_t value, size_t vl) { + return vse16_v_i16m2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m4(int16_t *base, vint16m4_t value, size_t vl) { + return vse16_v_i16m4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m8(int16_t *base, vint16m8_t value, size_t vl) { + return vse16_v_i16m8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32mf2(int32_t *base, vint32mf2_t value, size_t vl) { + return vse32_v_i32mf2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m1(int32_t *base, vint32m1_t value, size_t vl) { + return vse32_v_i32m1(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m2(int32_t *base, vint32m2_t value, size_t vl) { + return vse32_v_i32m2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m4(int32_t *base, vint32m4_t value, size_t vl) { + return vse32_v_i32m4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m8(int32_t *base, vint32m8_t value, size_t vl) { + return vse32_v_i32m8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m1(int64_t *base, vint64m1_t value, size_t vl) { + return vse64_v_i64m1(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m2(int64_t *base, vint64m2_t value, size_t vl) { + return vse64_v_i64m2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m4(int64_t *base, vint64m4_t value, size_t vl) { + return vse64_v_i64m4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m8(int64_t *base, vint64m8_t value, size_t vl) { + return vse64_v_i64m8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8mf8(uint8_t *base, vuint8mf8_t value, size_t vl) { + return vse8_v_u8mf8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8mf4(uint8_t *base, vuint8mf4_t value, size_t vl) { + return vse8_v_u8mf4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8mf2(uint8_t *base, vuint8mf2_t value, size_t vl) { + return vse8_v_u8mf2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m1(uint8_t *base, vuint8m1_t value, size_t vl) { + return vse8_v_u8m1(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m2(uint8_t *base, vuint8m2_t value, size_t vl) { + return vse8_v_u8m2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m4(uint8_t *base, vuint8m4_t value, size_t vl) { + return vse8_v_u8m4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m8(uint8_t *base, vuint8m8_t value, size_t vl) { + return vse8_v_u8m8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16mf4(uint16_t *base, vuint16mf4_t value, size_t vl) { + return vse16_v_u16mf4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16mf2(uint16_t *base, vuint16mf2_t value, size_t vl) { + return vse16_v_u16mf2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m1(uint16_t *base, vuint16m1_t value, size_t vl) { + return vse16_v_u16m1(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m2(uint16_t *base, vuint16m2_t value, size_t vl) { + return vse16_v_u16m2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m4(uint16_t *base, vuint16m4_t value, size_t vl) { + return vse16_v_u16m4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m8(uint16_t *base, vuint16m8_t value, size_t vl) { + return vse16_v_u16m8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32mf2(uint32_t *base, vuint32mf2_t value, size_t vl) { + return vse32_v_u32mf2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m1(uint32_t *base, vuint32m1_t value, size_t vl) { + return vse32_v_u32m1(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m2(uint32_t *base, vuint32m2_t value, size_t vl) { + return vse32_v_u32m2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m4(uint32_t *base, vuint32m4_t value, size_t vl) { + return vse32_v_u32m4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m8(uint32_t *base, vuint32m8_t value, size_t vl) { + return vse32_v_u32m8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m1(uint64_t *base, vuint64m1_t value, size_t vl) { + return vse64_v_u64m1(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m2(uint64_t *base, vuint64m2_t value, size_t vl) { + return vse64_v_u64m2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m4(uint64_t *base, vuint64m4_t value, size_t vl) { + return vse64_v_u64m4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m8(uint64_t *base, vuint64m8_t value, size_t vl) { + return vse64_v_u64m8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32mf2(float *base, vfloat32mf2_t value, size_t vl) { + return vse32_v_f32mf2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m1(float *base, vfloat32m1_t value, size_t vl) { + return vse32_v_f32m1(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m2(float *base, vfloat32m2_t value, size_t vl) { + return vse32_v_f32m2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m4(float *base, vfloat32m4_t value, size_t vl) { + return vse32_v_f32m4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv16f32.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m8(float *base, vfloat32m8_t value, size_t vl) { + return vse32_v_f32m8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv1f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m1(double *base, vfloat64m1_t value, size_t vl) { + return vse64_v_f64m1(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv2f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m2(double *base, vfloat64m2_t value, size_t vl) { + return vse64_v_f64m2(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv4f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m4(double *base, vfloat64m4_t value, size_t vl) { + return vse64_v_f64m4(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.nxv8f64.i32( [[VALUE:%.*]], * [[TMP0]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m8(double *base, vfloat64m8_t value, size_t vl) { + return vse64_v_f64m8(base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8mf8_m(vbool64_t mask, int8_t *base, vint8mf8_t value, size_t vl) { + return vse8_v_i8mf8_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8mf4_m(vbool32_t mask, int8_t *base, vint8mf4_t value, size_t vl) { + return vse8_v_i8mf4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8mf2_m(vbool16_t mask, int8_t *base, vint8mf2_t value, size_t vl) { + return vse8_v_i8mf2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m1_m(vbool8_t mask, int8_t *base, vint8m1_t value, size_t vl) { + return vse8_v_i8m1_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m2_m(vbool4_t mask, int8_t *base, vint8m2_t value, size_t vl) { + return vse8_v_i8m2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m4_m(vbool2_t mask, int8_t *base, vint8m4_t value, size_t vl) { + return vse8_v_i8m4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_i8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_i8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_i8m8_m(vbool1_t mask, int8_t *base, vint8m8_t value, size_t vl) { + return vse8_v_i8m8_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16mf4_m(vbool64_t mask, int16_t *base, vint16mf4_t value, size_t vl) { + return vse16_v_i16mf4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16mf2_m(vbool32_t mask, int16_t *base, vint16mf2_t value, size_t vl) { + return vse16_v_i16mf2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m1_m(vbool16_t mask, int16_t *base, vint16m1_t value, size_t vl) { + return vse16_v_i16m1_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m2_m(vbool8_t mask, int16_t *base, vint16m2_t value, size_t vl) { + return vse16_v_i16m2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m4_m(vbool4_t mask, int16_t *base, vint16m4_t value, size_t vl) { + return vse16_v_i16m4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_i16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_i16m8_m(vbool2_t mask, int16_t *base, vint16m8_t value, size_t vl) { + return vse16_v_i16m8_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32mf2_m(vbool64_t mask, int32_t *base, vint32mf2_t value, size_t vl) { + return vse32_v_i32mf2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m1_m(vbool32_t mask, int32_t *base, vint32m1_t value, size_t vl) { + return vse32_v_i32m1_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m2_m(vbool16_t mask, int32_t *base, vint32m2_t value, size_t vl) { + return vse32_v_i32m2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m4_m(vbool8_t mask, int32_t *base, vint32m4_t value, size_t vl) { + return vse32_v_i32m4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_i32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_i32m8_m(vbool4_t mask, int32_t *base, vint32m8_t value, size_t vl) { + return vse32_v_i32m8_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m1_m(vbool64_t mask, int64_t *base, vint64m1_t value, size_t vl) { + return vse64_v_i64m1_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m2_m(vbool32_t mask, int64_t *base, vint64m2_t value, size_t vl) { + return vse64_v_i64m2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m4_m(vbool16_t mask, int64_t *base, vint64m4_t value, size_t vl) { + return vse64_v_i64m4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_i64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_i64m8_m(vbool8_t mask, int64_t *base, vint64m8_t value, size_t vl) { + return vse64_v_i64m8_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8mf8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8mf8_m(vbool64_t mask, uint8_t *base, vuint8mf8_t value, size_t vl) { + return vse8_v_u8mf8_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8mf4_m(vbool32_t mask, uint8_t *base, vuint8mf4_t value, size_t vl) { + return vse8_v_u8mf4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8mf2_m(vbool16_t mask, uint8_t *base, vuint8mf2_t value, size_t vl) { + return vse8_v_u8mf2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m1_m(vbool8_t mask, uint8_t *base, vuint8m1_t value, size_t vl) { + return vse8_v_u8m1_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m2_m(vbool4_t mask, uint8_t *base, vuint8m2_t value, size_t vl) { + return vse8_v_u8m2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m4_m(vbool2_t mask, uint8_t *base, vuint8m4_t value, size_t vl) { + return vse8_v_u8m4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse8_v_u8m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse8_v_u8m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i8* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv64i8.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse8_v_u8m8_m(vbool1_t mask, uint8_t *base, vuint8m8_t value, size_t vl) { + return vse8_v_u8m8_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16mf4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16mf4_m(vbool64_t mask, uint16_t *base, vuint16mf4_t value, size_t vl) { + return vse16_v_u16mf4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16mf2_m(vbool32_t mask, uint16_t *base, vuint16mf2_t value, size_t vl) { + return vse16_v_u16mf2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m1_m(vbool16_t mask, uint16_t *base, vuint16m1_t value, size_t vl) { + return vse16_v_u16m1_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m2_m(vbool8_t mask, uint16_t *base, vuint16m2_t value, size_t vl) { + return vse16_v_u16m2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m4_m(vbool4_t mask, uint16_t *base, vuint16m4_t value, size_t vl) { + return vse16_v_u16m4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse16_v_u16m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse16_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i16* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv32i16.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse16_v_u16m8_m(vbool2_t mask, uint16_t *base, vuint16m8_t value, size_t vl) { + return vse16_v_u16m8_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32mf2_m(vbool64_t mask, uint32_t *base, vuint32mf2_t value, size_t vl) { + return vse32_v_u32mf2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m1_m(vbool32_t mask, uint32_t *base, vuint32m1_t value, size_t vl) { + return vse32_v_u32m1_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m2_m(vbool16_t mask, uint32_t *base, vuint32m2_t value, size_t vl) { + return vse32_v_u32m2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m4_m(vbool8_t mask, uint32_t *base, vuint32m4_t value, size_t vl) { + return vse32_v_u32m4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i32* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16i32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_u32m8_m(vbool4_t mask, uint32_t *base, vuint32m8_t value, size_t vl) { + return vse32_v_u32m8_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m1_m(vbool64_t mask, uint64_t *base, vuint64m1_t value, size_t vl) { + return vse64_v_u64m1_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m2_m(vbool32_t mask, uint64_t *base, vuint64m2_t value, size_t vl) { + return vse64_v_u64m2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m4_m(vbool16_t mask, uint64_t *base, vuint64m4_t value, size_t vl) { + return vse64_v_u64m4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast i64* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8i64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_u64m8_m(vbool8_t mask, uint64_t *base, vuint64m8_t value, size_t vl) { + return vse64_v_u64m8_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32mf2_m(vbool64_t mask, float *base, vfloat32mf2_t value, size_t vl) { + return vse32_v_f32mf2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m1_m(vbool32_t mask, float *base, vfloat32m1_t value, size_t vl) { + return vse32_v_f32m1_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m2_m(vbool16_t mask, float *base, vfloat32m2_t value, size_t vl) { + return vse32_v_f32m2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m4_m(vbool8_t mask, float *base, vfloat32m4_t value, size_t vl) { + return vse32_v_f32m4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse32_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse32_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast float* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv16f32.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse32_v_f32m8_m(vbool4_t mask, float *base, vfloat32m8_t value, size_t vl) { + return vse32_v_f32m8_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv1f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m1_m(vbool64_t mask, double *base, vfloat64m1_t value, size_t vl) { + return vse64_v_f64m1_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv2f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m2_m(vbool32_t mask, double *base, vfloat64m2_t value, size_t vl) { + return vse64_v_f64m2_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv4f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m4_m(vbool16_t mask, double *base, vfloat64m4_t value, size_t vl) { + return vse64_v_f64m4_m(mask, base, value, vl); +} + +// CHECK-RV32-LABEL: @test_vse64_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV32-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i32( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret void +// +// CHECK-RV64-LABEL: @test_vse64_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast double* [[BASE:%.*]] to * +// CHECK-RV64-NEXT: call void @llvm.riscv.vse.mask.nxv8f64.i64( [[VALUE:%.*]], * [[TMP0]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret void +// +void test_vse64_v_f64m8_m(vbool8_t mask, double *base, vfloat64m8_t value, size_t vl) { + return vse64_v_f64m8_m(mask, base, value, vl); +} diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -15,6 +15,7 @@ //===----------------------------------------------------------------------===// #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" @@ -162,7 +163,8 @@ StringRef IRName, bool HasSideEffects, bool IsMask, bool HasMaskedOffOperand, bool HasVL, bool HasGeneric, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, - const std::vector &IntrinsicTypes); + const std::vector &IntrinsicTypes, + const std::vector &PermuteOperands); ~RVVIntrinsic() = default; StringRef getName() const { return Name; } @@ -644,7 +646,8 @@ bool HasMaskedOffOperand, bool HasVL, bool HasGeneric, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &OutInTypes, - const std::vector &NewIntrinsicTypes) + const std::vector &NewIntrinsicTypes, + const std::vector &PermuteOperands) : IRName(IRName), HasSideEffects(HasSideEffects), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), HasGeneric(HasGeneric), HasAutoDef(HasAutoDef), @@ -677,6 +680,29 @@ InputTypes.assign(OutInTypes.begin() + 1, OutInTypes.end()); CTypeOrder.resize(InputTypes.size()); std::iota(CTypeOrder.begin(), CTypeOrder.end(), 0); + // Update default order if we need permutate. + if (!PermuteOperands.empty()) { + // PermuteOperands is nonmasked version index. Update index when there is + // maskedoff operand which is always in first operand. + + unsigned Skew = HasMaskedOffOperand ? 1 : 0; + for (unsigned i = 0; i < PermuteOperands.size(); ++i) { + if (i != PermuteOperands[i]) + CTypeOrder[i] = PermuteOperands[i] + Skew; + } + // Verify the result of CTypeOrder has legal value. + if (*std::max_element(CTypeOrder.begin(), CTypeOrder.end()) >= + CTypeOrder.size()) + PrintFatalError( + "The index of PermuteOperand is bigger than the operand number"); + SmallSet Seen; + for (auto Idx : CTypeOrder) { + if (!Seen.insert(Idx).second) + PrintFatalError( + "The different element in PermuteOperand could not be equal"); + } + } + if (IsMask) { if (HasVL) // Builtin type order: op0, op1, ..., mask, vl @@ -945,6 +971,8 @@ StringRef ManualCodegenMask = R->getValueAsString("ManualCodegenMask"); std::vector IntrinsicTypes = R->getValueAsListOfInts("IntrinsicTypes"); + std::vector PermuteOperands = + R->getValueAsListOfInts("PermuteOperands"); StringRef IRName = R->getValueAsString("IRName"); StringRef IRNameMask = R->getValueAsString("IRNameMask"); @@ -993,7 +1021,8 @@ Out.push_back(std::make_unique( Name, SuffixStr, MangledName, IRName, HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasGeneric, - HasAutoDef, ManualCodegen, Types.getValue(), IntrinsicTypes)); + HasAutoDef, ManualCodegen, Types.getValue(), IntrinsicTypes, + PermuteOperands)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = @@ -1002,7 +1031,7 @@ Name, SuffixStr, MangledName, IRNameMask, HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasGeneric, HasAutoDef, ManualCodegenMask, MaskTypes.getValue(), - IntrinsicTypes)); + IntrinsicTypes, PermuteOperands)); } } // end for Log2LMULList } // end for TypeRange