diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -131,7 +131,7 @@ // There are a number of attributes that are used to constraint the number and // shape of the builtins generated. Refer to the comments below for them. class RVVBuiltin { + string mangled_suffix = ""> { // Base name that will be prepended in __builtin_rvv_ and appended the // computed Suffix. string Name = NAME; @@ -145,6 +145,10 @@ // It's used for describe some special naming cases. string MangledName = ""; + // If not empty, each MangledName will have this appended after an + // underscore (_). It is instantiated like Prototype. + string MangledSuffix = mangled_suffix; + // The different variants of the builtin, parameterised with a type. string TypeRange = type_range; @@ -1711,29 +1715,30 @@ } // Miscellaneous -let HasMask = false, HasVL = false, HasNoMaskedOverloaded = false, - IRName = "" in { +let HasMask = false, HasVL = false, IRName = "" in { let Name = "vreinterpret_v", ManualCodegen = [{ return Builder.CreateBitCast(Ops[0], ResultType); }] in { // Reinterpret between different type under the same SEW and LMUL - def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil">; - def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "il">; - def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil">; - def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "il">; - def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "il">; - def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "il">; + def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil", "v">; + def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "il", "v">; + def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil", "Uv">; + def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "il", "Uv">; + def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "il", "Fv">; + def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "il", "Fv">; // Reinterpret between different SEW under the same LMUL foreach dst_sew = ["(FixedSEW:8)", "(FixedSEW:16)", "(FixedSEW:32)", "(FixedSEW:64)"] in { - def vreinterpret_i_ # dst_sew : RVVBuiltin<"v" # dst_sew # "v", dst_sew # "vv", "csil">; - def vreinterpret_u_ # dst_sew : RVVBuiltin<"Uv" # dst_sew # "Uv", dst_sew # "UvUv", "csil">; + def vreinterpret_i_ # dst_sew : RVVBuiltin<"v" # dst_sew # "v", + dst_sew # "vv", "csil", dst_sew # "v">; + def vreinterpret_u_ # dst_sew : RVVBuiltin<"Uv" # dst_sew # "Uv", + dst_sew # "UvUv", "csil", dst_sew # "Uv">; } } - let Name = "vundefined", + let Name = "vundefined", HasNoMaskedOverloaded = false, ManualCodegen = [{ return llvm::UndefValue::get(ResultType); }] in { @@ -1743,7 +1748,7 @@ // LMUL truncation // C/C++ Operand: VecTy, IR Operand: VecTy, Index - let Name = "vlmul_trunc_v", + let Name = "vlmul_trunc_v", MangledName = "vlmul_trunc", ManualCodegen = [{ { ID = Intrinsic::experimental_vector_extract; IntrinsicTypes = {ResultType, Ops[0]->getType()}; @@ -1752,14 +1757,16 @@ } }] in { foreach dst_lmul = ["(SFixedLog2LMUL:-3)", "(SFixedLog2LMUL:-2)", "(SFixedLog2LMUL:-1)", "(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in { - def vlmul_trunc # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vv", "csilfd">; - def vlmul_trunc_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUv", "csil">; + def vlmul_trunc # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", + dst_lmul # "vv", "csilfd", dst_lmul # "v">; + def vlmul_trunc_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", + dst_lmul # "UvUv", "csil", dst_lmul # "Uv">; } } // LMUL extension // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index - let Name = "vlmul_ext_v", + let Name = "vlmul_ext_v", MangledName = "vlmul_ext", ManualCodegen = [{ ID = Intrinsic::experimental_vector_insert; IntrinsicTypes = {ResultType, Ops[0]->getType()}; @@ -1770,8 +1777,10 @@ }] in { foreach dst_lmul = ["(LFixedLog2LMUL:-2)", "(LFixedLog2LMUL:-1)", "(LFixedLog2LMUL:-0)", "(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in { - def vlmul_ext # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vv", "csilfd">; - def vlmul_ext_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUv", "csil">; + def vlmul_ext # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", + dst_lmul # "vv", "csilfd", dst_lmul # "v">; + def vlmul_ext_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", + dst_lmul # "UvUv", "csil", dst_lmul # "Uv">; } } @@ -1788,8 +1797,8 @@ } }] in { foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in { - def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "csilfd">; - def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUvKz", "csil">; + def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "csilfd", dst_lmul # "v">; + def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUvKz", "csil", dst_lmul # "Uv">; } } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c @@ -0,0 +1,547 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) { + return vget_i8m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) { + return vget_i8m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) { + return vget_i8m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) { + return vget_i8m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) { + return vget_i8m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) { + return vget_i8m4(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) { + return vget_u8m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) { + return vget_u8m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) { + return vget_u8m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) { + return vget_u8m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) { + return vget_u8m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) { + return vget_u8m4(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) { + return vget_i16m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) { + return vget_i16m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) { + return vget_i16m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) { + return vget_i16m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) { + return vget_i16m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) { + return vget_i16m4(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) { + return vget_u16m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) { + return vget_u16m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) { + return vget_u16m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) { + return vget_u16m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) { + return vget_u16m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) { + return vget_u16m4(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) { + return vget_i32m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) { + return vget_i32m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) { + return vget_i32m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) { + return vget_i32m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) { + return vget_i32m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) { + return vget_i32m4(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) { + return vget_u32m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) { + return vget_u32m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) { + return vget_u32m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) { + return vget_u32m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) { + return vget_u32m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) { + return vget_u32m4(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) { + return vget_f32m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) { + return vget_f32m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) { + return vget_f32m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) { + return vget_f32m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) { + return vget_f32m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) { + return vget_f32m4(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) { + return vget_i64m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) { + return vget_i64m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) { + return vget_i64m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) { + return vget_i64m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) { + return vget_i64m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) { + return vget_i64m4(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) { + return vget_u64m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) { + return vget_u64m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) { + return vget_u64m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) { + return vget_u64m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) { + return vget_u64m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) { + return vget_u64m4(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) { + return vget_f64m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) { + return vget_f64m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) { + return vget_f64m1(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) { + return vget_f64m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) { + return vget_f64m2(src, 0); +} + +// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[SRC:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) { + return vget_f64m4(src, 0); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c @@ -0,0 +1,2166 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) { + return vlmul_ext_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) { + return vlmul_ext_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) { + return vlmul_ext_i8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) { + return vlmul_ext_i8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) { + return vlmul_ext_i8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) { + return vlmul_ext_i8m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) { + return vlmul_ext_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) { + return vlmul_ext_i8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) { + return vlmul_ext_i8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) { + return vlmul_ext_i8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) { + return vlmul_ext_i8m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) { + return vlmul_ext_i8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) { + return vlmul_ext_i8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) { + return vlmul_ext_i8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) { + return vlmul_ext_i8m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) { + return vlmul_ext_i8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) { + return vlmul_ext_i8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) { + return vlmul_ext_i8m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) { + return vlmul_ext_i8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) { + return vlmul_ext_i8m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m4_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) { + return vlmul_ext_i8m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) { + return vlmul_ext_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) { + return vlmul_ext_i16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) { + return vlmul_ext_i16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) { + return vlmul_ext_i16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) { + return vlmul_ext_i16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) { + return vlmul_ext_i16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) { + return vlmul_ext_i16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) { + return vlmul_ext_i16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) { + return vlmul_ext_i16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) { + return vlmul_ext_i16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) { + return vlmul_ext_i16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) { + return vlmul_ext_i16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) { + return vlmul_ext_i16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) { + return vlmul_ext_i16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m4_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) { + return vlmul_ext_i16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) { + return vlmul_ext_i32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) { + return vlmul_ext_i32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) { + return vlmul_ext_i32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) { + return vlmul_ext_i32m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) { + return vlmul_ext_i32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) { + return vlmul_ext_i32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) { + return vlmul_ext_i32m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) { + return vlmul_ext_i32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) { + return vlmul_ext_i32m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m4_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) { + return vlmul_ext_i32m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) { + return vlmul_ext_i64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) { + return vlmul_ext_i64m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) { + return vlmul_ext_i64m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) { + return vlmul_ext_i64m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) { + return vlmul_ext_i64m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m4_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) { + return vlmul_ext_i64m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) { + return vlmul_ext_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) { + return vlmul_ext_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) { + return vlmul_ext_u8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) { + return vlmul_ext_u8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) { + return vlmul_ext_u8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) { + return vlmul_ext_u8m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) { + return vlmul_ext_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) { + return vlmul_ext_u8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) { + return vlmul_ext_u8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) { + return vlmul_ext_u8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) { + return vlmul_ext_u8m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) { + return vlmul_ext_u8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) { + return vlmul_ext_u8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) { + return vlmul_ext_u8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) { + return vlmul_ext_u8m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) { + return vlmul_ext_u8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) { + return vlmul_ext_u8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) { + return vlmul_ext_u8m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) { + return vlmul_ext_u8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) { + return vlmul_ext_u8m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m4_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) { + return vlmul_ext_u8m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) { + return vlmul_ext_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) { + return vlmul_ext_u16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) { + return vlmul_ext_u16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) { + return vlmul_ext_u16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) { + return vlmul_ext_u16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) { + return vlmul_ext_u16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) { + return vlmul_ext_u16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) { + return vlmul_ext_u16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) { + return vlmul_ext_u16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) { + return vlmul_ext_u16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) { + return vlmul_ext_u16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) { + return vlmul_ext_u16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) { + return vlmul_ext_u16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) { + return vlmul_ext_u16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m4_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) { + return vlmul_ext_u16m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) { + return vlmul_ext_u32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) { + return vlmul_ext_u32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) { + return vlmul_ext_u32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) { + return vlmul_ext_u32m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) { + return vlmul_ext_u32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) { + return vlmul_ext_u32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) { + return vlmul_ext_u32m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) { + return vlmul_ext_u32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) { + return vlmul_ext_u32m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m4_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) { + return vlmul_ext_u32m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) { + return vlmul_ext_u64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) { + return vlmul_ext_u64m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) { + return vlmul_ext_u64m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) { + return vlmul_ext_u64m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) { + return vlmul_ext_u64m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m4_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { + return vlmul_ext_u64m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) { + return vlmul_ext_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) { + return vlmul_ext_f32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) { + return vlmul_ext_f32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) { + return vlmul_ext_f32m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) { + return vlmul_ext_f32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) { + return vlmul_ext_f32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) { + return vlmul_ext_f32m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) { + return vlmul_ext_f32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) { + return vlmul_ext_f32m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m4_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv8f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) { + return vlmul_ext_f32m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) { + return vlmul_ext_f64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) { + return vlmul_ext_f64m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) { + return vlmul_ext_f64m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) { + return vlmul_ext_f64m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) { + return vlmul_ext_f64m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m4_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv4f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) { + return vlmul_ext_f64m8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { + return vlmul_trunc_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { + return vlmul_trunc_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { + return vlmul_trunc_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { + return vlmul_trunc_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { + return vlmul_trunc_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { + return vlmul_trunc_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { + return vlmul_trunc_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { + return vlmul_trunc_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { + return vlmul_trunc_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { + return vlmul_trunc_i8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { + return vlmul_trunc_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { + return vlmul_trunc_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { + return vlmul_trunc_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { + return vlmul_trunc_i8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { + return vlmul_trunc_i8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { + return vlmul_trunc_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { + return vlmul_trunc_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { + return vlmul_trunc_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { + return vlmul_trunc_i8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { + return vlmul_trunc_i8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { + return vlmul_trunc_i8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { + return vlmul_trunc_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { + return vlmul_trunc_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { + return vlmul_trunc_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { + return vlmul_trunc_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { + return vlmul_trunc_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { + return vlmul_trunc_i16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { + return vlmul_trunc_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { + return vlmul_trunc_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { + return vlmul_trunc_i16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { + return vlmul_trunc_i16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { + return vlmul_trunc_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { + return vlmul_trunc_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { + return vlmul_trunc_i16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { + return vlmul_trunc_i16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { + return vlmul_trunc_i16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { + return vlmul_trunc_i32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { + return vlmul_trunc_i32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { + return vlmul_trunc_i32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { + return vlmul_trunc_i32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { + return vlmul_trunc_i32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { + return vlmul_trunc_i32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { + return vlmul_trunc_i32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { + return vlmul_trunc_i32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { + return vlmul_trunc_i32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { + return vlmul_trunc_i32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { + return vlmul_trunc_i64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { + return vlmul_trunc_i64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { + return vlmul_trunc_i64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { + return vlmul_trunc_i64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { + return vlmul_trunc_i64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { + return vlmul_trunc_i64m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { + return vlmul_trunc_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { + return vlmul_trunc_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { + return vlmul_trunc_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { + return vlmul_trunc_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { + return vlmul_trunc_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { + return vlmul_trunc_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { + return vlmul_trunc_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { + return vlmul_trunc_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { + return vlmul_trunc_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { + return vlmul_trunc_u8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { + return vlmul_trunc_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { + return vlmul_trunc_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { + return vlmul_trunc_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { + return vlmul_trunc_u8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { + return vlmul_trunc_u8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { + return vlmul_trunc_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { + return vlmul_trunc_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { + return vlmul_trunc_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { + return vlmul_trunc_u8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { + return vlmul_trunc_u8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { + return vlmul_trunc_u8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { + return vlmul_trunc_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { + return vlmul_trunc_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { + return vlmul_trunc_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { + return vlmul_trunc_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { + return vlmul_trunc_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { + return vlmul_trunc_u16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { + return vlmul_trunc_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { + return vlmul_trunc_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { + return vlmul_trunc_u16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { + return vlmul_trunc_u16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { + return vlmul_trunc_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { + return vlmul_trunc_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { + return vlmul_trunc_u16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { + return vlmul_trunc_u16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { + return vlmul_trunc_u16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { + return vlmul_trunc_u32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { + return vlmul_trunc_u32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { + return vlmul_trunc_u32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { + return vlmul_trunc_u32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { + return vlmul_trunc_u32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { + return vlmul_trunc_u32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { + return vlmul_trunc_u32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { + return vlmul_trunc_u32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { + return vlmul_trunc_u32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { + return vlmul_trunc_u32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { + return vlmul_trunc_u64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { + return vlmul_trunc_u64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { + return vlmul_trunc_u64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { + return vlmul_trunc_u64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { + return vlmul_trunc_u64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { + return vlmul_trunc_u64m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { + return vlmul_trunc_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { + return vlmul_trunc_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { + return vlmul_trunc_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { + return vlmul_trunc_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { + return vlmul_trunc_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { + return vlmul_trunc_f32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { + return vlmul_trunc_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { + return vlmul_trunc_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { + return vlmul_trunc_f32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { + return vlmul_trunc_f32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { + return vlmul_trunc_f64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { + return vlmul_trunc_f64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { + return vlmul_trunc_f64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { + return vlmul_trunc_f64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { + return vlmul_trunc_f64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { + return vlmul_trunc_f64m4(op1); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vreinterpret.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vreinterpret.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vreinterpret.c @@ -0,0 +1,1690 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf8_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8mf8_t test_vreinterpret_v_i8mf8_u8mf8(vint8mf8_t src) { + return vreinterpret_u8mf8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8mf4_t test_vreinterpret_v_i8mf4_u8mf4(vint8mf4_t src) { + return vreinterpret_u8mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8mf2_t test_vreinterpret_v_i8mf2_u8mf2(vint8mf2_t src) { + return vreinterpret_u8mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8m1_t test_vreinterpret_v_i8m1_u8m1(vint8m1_t src) { + return vreinterpret_u8m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8m2_t test_vreinterpret_v_i8m2_u8m2(vint8m2_t src) { + return vreinterpret_u8m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8m4_t test_vreinterpret_v_i8m4_u8m4(vint8m4_t src) { + return vreinterpret_u8m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8m8_t test_vreinterpret_v_i8m8_u8m8(vint8m8_t src) { + return vreinterpret_u8m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf8_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8mf8_t test_vreinterpret_v_u8mf8_i8mf8(vuint8mf8_t src) { + return vreinterpret_i8mf8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8mf4_t test_vreinterpret_v_u8mf4_i8mf4(vuint8mf4_t src) { + return vreinterpret_i8mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8mf2_t test_vreinterpret_v_u8mf2_i8mf2(vuint8mf2_t src) { + return vreinterpret_i8mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8m1_t test_vreinterpret_v_u8m1_i8m1(vuint8m1_t src) { + return vreinterpret_i8m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8m2_t test_vreinterpret_v_u8m2_i8m2(vuint8m2_t src) { + return vreinterpret_i8m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8m4_t test_vreinterpret_v_u8m4_i8m4(vuint8m4_t src) { + return vreinterpret_i8m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8m8_t test_vreinterpret_v_u8m8_i8m8(vuint8m8_t src) { + return vreinterpret_i8m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint16mf4_t test_vreinterpret_v_i16mf4_u16mf4(vint16mf4_t src) { + return vreinterpret_u16mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint16mf2_t test_vreinterpret_v_i16mf2_u16mf2(vint16mf2_t src) { + return vreinterpret_u16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint16m1_t test_vreinterpret_v_i16m1_u16m1(vint16m1_t src) { + return vreinterpret_u16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint16m2_t test_vreinterpret_v_i16m2_u16m2(vint16m2_t src) { + return vreinterpret_u16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint16m4_t test_vreinterpret_v_i16m4_u16m4(vint16m4_t src) { + return vreinterpret_u16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint16m8_t test_vreinterpret_v_i16m8_u16m8(vint16m8_t src) { + return vreinterpret_u16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint16mf4_t test_vreinterpret_v_u16mf4_i16mf4(vuint16mf4_t src) { + return vreinterpret_i16mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint16mf2_t test_vreinterpret_v_u16mf2_i16mf2(vuint16mf2_t src) { + return vreinterpret_i16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint16m1_t test_vreinterpret_v_u16m1_i16m1(vuint16m1_t src) { + return vreinterpret_i16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint16m2_t test_vreinterpret_v_u16m2_i16m2(vuint16m2_t src) { + return vreinterpret_i16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint16m4_t test_vreinterpret_v_u16m4_i16m4(vuint16m4_t src) { + return vreinterpret_i16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint16m8_t test_vreinterpret_v_u16m8_i16m8(vuint16m8_t src) { + return vreinterpret_i16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint32mf2_t test_vreinterpret_v_i32mf2_u32mf2(vint32mf2_t src) { + return vreinterpret_u32mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint32m1_t test_vreinterpret_v_i32m1_u32m1(vint32m1_t src) { + return vreinterpret_u32m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint32m2_t test_vreinterpret_v_i32m2_u32m2(vint32m2_t src) { + return vreinterpret_u32m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint32m4_t test_vreinterpret_v_i32m4_u32m4(vint32m4_t src) { + return vreinterpret_u32m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint32m8_t test_vreinterpret_v_i32m8_u32m8(vint32m8_t src) { + return vreinterpret_u32m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint32mf2_t test_vreinterpret_v_u32mf2_i32mf2(vuint32mf2_t src) { + return vreinterpret_i32mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint32m1_t test_vreinterpret_v_u32m1_i32m1(vuint32m1_t src) { + return vreinterpret_i32m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint32m2_t test_vreinterpret_v_u32m2_i32m2(vuint32m2_t src) { + return vreinterpret_i32m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint32m4_t test_vreinterpret_v_u32m4_i32m4(vuint32m4_t src) { + return vreinterpret_i32m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint32m8_t test_vreinterpret_v_u32m8_i32m8(vuint32m8_t src) { + return vreinterpret_i32m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vreinterpret_v_f32mf2_i32mf2(vfloat32mf2_t src) { + return vreinterpret_i32mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vreinterpret_v_f32m1_i32m1(vfloat32m1_t src) { + return vreinterpret_i32m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vreinterpret_v_f32m2_i32m2(vfloat32m2_t src) { + return vreinterpret_i32m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vreinterpret_v_f32m4_i32m4(vfloat32m4_t src) { + return vreinterpret_i32m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vreinterpret_v_f32m8_i32m8(vfloat32m8_t src) { + return vreinterpret_i32m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vreinterpret_v_f32mf2_u32mf2(vfloat32mf2_t src) { + return vreinterpret_u32mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vreinterpret_v_f32m1_u32m1(vfloat32m1_t src) { + return vreinterpret_u32m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vreinterpret_v_f32m2_u32m2(vfloat32m2_t src) { + return vreinterpret_u32m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vreinterpret_v_f32m4_u32m4(vfloat32m4_t src) { + return vreinterpret_u32m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vreinterpret_v_f32m8_u32m8(vfloat32m8_t src) { + return vreinterpret_u32m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vreinterpret_v_i32mf2_f32mf2(vint32mf2_t src) { + return vreinterpret_f32mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vreinterpret_v_i32m1_f32m1(vint32m1_t src) { + return vreinterpret_f32m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vreinterpret_v_i32m2_f32m2(vint32m2_t src) { + return vreinterpret_f32m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vreinterpret_v_i32m4_f32m4(vint32m4_t src) { + return vreinterpret_f32m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vreinterpret_v_i32m8_f32m8(vint32m8_t src) { + return vreinterpret_f32m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vreinterpret_v_u32mf2_f32mf2(vuint32mf2_t src) { + return vreinterpret_f32mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vreinterpret_v_u32m1_f32m1(vuint32m1_t src) { + return vreinterpret_f32m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vreinterpret_v_u32m2_f32m2(vuint32m2_t src) { + return vreinterpret_f32m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vreinterpret_v_u32m4_f32m4(vuint32m4_t src) { + return vreinterpret_f32m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vreinterpret_v_u32m8_f32m8(vuint32m8_t src) { + return vreinterpret_f32m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint64m1_t test_vreinterpret_v_i64m1_u64m1(vint64m1_t src) { + return vreinterpret_u64m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint64m2_t test_vreinterpret_v_i64m2_u64m2(vint64m2_t src) { + return vreinterpret_u64m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint64m4_t test_vreinterpret_v_i64m4_u64m4(vint64m4_t src) { + return vreinterpret_u64m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint64m8_t test_vreinterpret_v_i64m8_u64m8(vint64m8_t src) { + return vreinterpret_u64m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint64m1_t test_vreinterpret_v_u64m1_i64m1(vuint64m1_t src) { + return vreinterpret_i64m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint64m2_t test_vreinterpret_v_u64m2_i64m2(vuint64m2_t src) { + return vreinterpret_i64m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint64m4_t test_vreinterpret_v_u64m4_i64m4(vuint64m4_t src) { + return vreinterpret_i64m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint64m8_t test_vreinterpret_v_u64m8_i64m8(vuint64m8_t src) { + return vreinterpret_i64m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vreinterpret_v_f64m1_i64m1(vfloat64m1_t src) { + return vreinterpret_i64m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vreinterpret_v_f64m2_i64m2(vfloat64m2_t src) { + return vreinterpret_i64m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vreinterpret_v_f64m4_i64m4(vfloat64m4_t src) { + return vreinterpret_i64m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vreinterpret_v_f64m8_i64m8(vfloat64m8_t src) { + return vreinterpret_i64m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vreinterpret_v_f64m1_u64m1(vfloat64m1_t src) { + return vreinterpret_u64m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vreinterpret_v_f64m2_u64m2(vfloat64m2_t src) { + return vreinterpret_u64m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vreinterpret_v_f64m4_u64m4(vfloat64m4_t src) { + return vreinterpret_u64m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vreinterpret_v_f64m8_u64m8(vfloat64m8_t src) { + return vreinterpret_u64m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vreinterpret_v_i64m1_f64m1(vint64m1_t src) { + return vreinterpret_f64m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vreinterpret_v_i64m2_f64m2(vint64m2_t src) { + return vreinterpret_f64m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vreinterpret_v_i64m4_f64m4(vint64m4_t src) { + return vreinterpret_f64m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vreinterpret_v_i64m8_f64m8(vint64m8_t src) { + return vreinterpret_f64m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vreinterpret_v_u64m1_f64m1(vuint64m1_t src) { + return vreinterpret_f64m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vreinterpret_v_u64m2_f64m2(vuint64m2_t src) { + return vreinterpret_f64m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vreinterpret_v_u64m4_f64m4(vuint64m4_t src) { + return vreinterpret_f64m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vreinterpret_v_u64m8_f64m8(vuint64m8_t src) { + return vreinterpret_f64m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vreinterpret_v_i8mf4_i16mf4(vint8mf4_t src) { + return vreinterpret_i16mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vreinterpret_v_i8mf2_i16mf2(vint8mf2_t src) { + return vreinterpret_i16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vreinterpret_v_i8m1_i16m1(vint8m1_t src) { + return vreinterpret_i16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vreinterpret_v_i8m2_i16m2(vint8m2_t src) { + return vreinterpret_i16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vreinterpret_v_i8m4_i16m4(vint8m4_t src) { + return vreinterpret_i16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vreinterpret_v_i8m8_i16m8(vint8m8_t src) { + return vreinterpret_i16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vreinterpret_v_u8mf4_u16mf4(vuint8mf4_t src) { + return vreinterpret_u16mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vreinterpret_v_u8mf2_u16mf2(vuint8mf2_t src) { + return vreinterpret_u16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vreinterpret_v_u8m1_u16m1(vuint8m1_t src) { + return vreinterpret_u16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vreinterpret_v_u8m2_u16m2(vuint8m2_t src) { + return vreinterpret_u16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vreinterpret_v_u8m4_u16m4(vuint8m4_t src) { + return vreinterpret_u16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vreinterpret_v_u8m8_u16m8(vuint8m8_t src) { + return vreinterpret_u16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vreinterpret_v_i8mf2_i32mf2(vint8mf2_t src) { + return vreinterpret_i32mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vreinterpret_v_i8m1_i32m1(vint8m1_t src) { + return vreinterpret_i32m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vreinterpret_v_i8m2_i32m2(vint8m2_t src) { + return vreinterpret_i32m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vreinterpret_v_i8m4_i32m4(vint8m4_t src) { + return vreinterpret_i32m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vreinterpret_v_i8m8_i32m8(vint8m8_t src) { + return vreinterpret_i32m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vreinterpret_v_u8mf2_u32mf2(vuint8mf2_t src) { + return vreinterpret_u32mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vreinterpret_v_u8m1_u32m1(vuint8m1_t src) { + return vreinterpret_u32m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vreinterpret_v_u8m2_u32m2(vuint8m2_t src) { + return vreinterpret_u32m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vreinterpret_v_u8m4_u32m4(vuint8m4_t src) { + return vreinterpret_u32m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vreinterpret_v_u8m8_u32m8(vuint8m8_t src) { + return vreinterpret_u32m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vreinterpret_v_i8m1_i64m1(vint8m1_t src) { + return vreinterpret_i64m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vreinterpret_v_i8m2_i64m2(vint8m2_t src) { + return vreinterpret_i64m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vreinterpret_v_i8m4_i64m4(vint8m4_t src) { + return vreinterpret_i64m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vreinterpret_v_i8m8_i64m8(vint8m8_t src) { + return vreinterpret_i64m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vreinterpret_v_u8m1_u64m1(vuint8m1_t src) { + return vreinterpret_u64m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vreinterpret_v_u8m2_u64m2(vuint8m2_t src) { + return vreinterpret_u64m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vreinterpret_v_u8m4_u64m4(vuint8m4_t src) { + return vreinterpret_u64m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vreinterpret_v_u8m8_u64m8(vuint8m8_t src) { + return vreinterpret_u64m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vreinterpret_v_i16mf4_i8mf4(vint16mf4_t src) { + return vreinterpret_i8mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vreinterpret_v_i16mf2_i8mf2(vint16mf2_t src) { + return vreinterpret_i8mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vreinterpret_v_i16m1_i8m1(vint16m1_t src) { + return vreinterpret_i8m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vreinterpret_v_i16m2_i8m2(vint16m2_t src) { + return vreinterpret_i8m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vreinterpret_v_i16m4_i8m4(vint16m4_t src) { + return vreinterpret_i8m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vreinterpret_v_i16m8_i8m8(vint16m8_t src) { + return vreinterpret_i8m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vreinterpret_v_u16mf4_u8mf4(vuint16mf4_t src) { + return vreinterpret_u8mf4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vreinterpret_v_u16mf2_u8mf2(vuint16mf2_t src) { + return vreinterpret_u8mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vreinterpret_v_u16m1_u8m1(vuint16m1_t src) { + return vreinterpret_u8m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vreinterpret_v_u16m2_u8m2(vuint16m2_t src) { + return vreinterpret_u8m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vreinterpret_v_u16m4_u8m4(vuint16m4_t src) { + return vreinterpret_u8m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vreinterpret_v_u16m8_u8m8(vuint16m8_t src) { + return vreinterpret_u8m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vreinterpret_v_i16mf2_i32mf2(vint16mf2_t src) { + return vreinterpret_i32mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vreinterpret_v_i16m1_i32m1(vint16m1_t src) { + return vreinterpret_i32m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vreinterpret_v_i16m2_i32m2(vint16m2_t src) { + return vreinterpret_i32m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vreinterpret_v_i16m4_i32m4(vint16m4_t src) { + return vreinterpret_i32m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vreinterpret_v_i16m8_i32m8(vint16m8_t src) { + return vreinterpret_i32m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vreinterpret_v_u16mf2_u32mf2(vuint16mf2_t src) { + return vreinterpret_u32mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vreinterpret_v_u16m1_u32m1(vuint16m1_t src) { + return vreinterpret_u32m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vreinterpret_v_u16m2_u32m2(vuint16m2_t src) { + return vreinterpret_u32m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vreinterpret_v_u16m4_u32m4(vuint16m4_t src) { + return vreinterpret_u32m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vreinterpret_v_u16m8_u32m8(vuint16m8_t src) { + return vreinterpret_u32m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vreinterpret_v_i16m1_i64m1(vint16m1_t src) { + return vreinterpret_i64m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vreinterpret_v_i16m2_i64m2(vint16m2_t src) { + return vreinterpret_i64m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vreinterpret_v_i16m4_i64m4(vint16m4_t src) { + return vreinterpret_i64m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vreinterpret_v_i16m8_i64m8(vint16m8_t src) { + return vreinterpret_i64m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vreinterpret_v_u16m1_u64m1(vuint16m1_t src) { + return vreinterpret_u64m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vreinterpret_v_u16m2_u64m2(vuint16m2_t src) { + return vreinterpret_u64m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vreinterpret_v_u16m4_u64m4(vuint16m4_t src) { + return vreinterpret_u64m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vreinterpret_v_u16m8_u64m8(vuint16m8_t src) { + return vreinterpret_u64m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vreinterpret_v_i32mf2_i8mf2(vint32mf2_t src) { + return vreinterpret_i8mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vreinterpret_v_i32m1_i8m1(vint32m1_t src) { + return vreinterpret_i8m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vreinterpret_v_i32m2_i8m2(vint32m2_t src) { + return vreinterpret_i8m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vreinterpret_v_i32m4_i8m4(vint32m4_t src) { + return vreinterpret_i8m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vreinterpret_v_i32m8_i8m8(vint32m8_t src) { + return vreinterpret_i8m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vreinterpret_v_u32mf2_u8mf2(vuint32mf2_t src) { + return vreinterpret_u8mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vreinterpret_v_u32m1_u8m1(vuint32m1_t src) { + return vreinterpret_u8m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vreinterpret_v_u32m2_u8m2(vuint32m2_t src) { + return vreinterpret_u8m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vreinterpret_v_u32m4_u8m4(vuint32m4_t src) { + return vreinterpret_u8m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vreinterpret_v_u32m8_u8m8(vuint32m8_t src) { + return vreinterpret_u8m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vreinterpret_v_i32mf2_i16mf2(vint32mf2_t src) { + return vreinterpret_i16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vreinterpret_v_i32m1_i16m1(vint32m1_t src) { + return vreinterpret_i16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vreinterpret_v_i32m2_i16m2(vint32m2_t src) { + return vreinterpret_i16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vreinterpret_v_i32m4_i16m4(vint32m4_t src) { + return vreinterpret_i16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vreinterpret_v_i32m8_i16m8(vint32m8_t src) { + return vreinterpret_i16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vreinterpret_v_u32mf2_u16mf2(vuint32mf2_t src) { + return vreinterpret_u16mf2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vreinterpret_v_u32m1_u16m1(vuint32m1_t src) { + return vreinterpret_u16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vreinterpret_v_u32m2_u16m2(vuint32m2_t src) { + return vreinterpret_u16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vreinterpret_v_u32m4_u16m4(vuint32m4_t src) { + return vreinterpret_u16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vreinterpret_v_u32m8_u16m8(vuint32m8_t src) { + return vreinterpret_u16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vreinterpret_v_i32m1_i64m1(vint32m1_t src) { + return vreinterpret_i64m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vreinterpret_v_i32m2_i64m2(vint32m2_t src) { + return vreinterpret_i64m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vreinterpret_v_i32m4_i64m4(vint32m4_t src) { + return vreinterpret_i64m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vreinterpret_v_i32m8_i64m8(vint32m8_t src) { + return vreinterpret_i64m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vreinterpret_v_u32m1_u64m1(vuint32m1_t src) { + return vreinterpret_u64m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vreinterpret_v_u32m2_u64m2(vuint32m2_t src) { + return vreinterpret_u64m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vreinterpret_v_u32m4_u64m4(vuint32m4_t src) { + return vreinterpret_u64m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vreinterpret_v_u32m8_u64m8(vuint32m8_t src) { + return vreinterpret_u64m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vreinterpret_v_i64m1_i8m1(vint64m1_t src) { + return vreinterpret_i8m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vreinterpret_v_i64m2_i8m2(vint64m2_t src) { + return vreinterpret_i8m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vreinterpret_v_i64m4_i8m4(vint64m4_t src) { + return vreinterpret_i8m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vreinterpret_v_i64m8_i8m8(vint64m8_t src) { + return vreinterpret_i8m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vreinterpret_v_u64m1_u8m1(vuint64m1_t src) { + return vreinterpret_u8m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vreinterpret_v_u64m2_u8m2(vuint64m2_t src) { + return vreinterpret_u8m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vreinterpret_v_u64m4_u8m4(vuint64m4_t src) { + return vreinterpret_u8m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vreinterpret_v_u64m8_u8m8(vuint64m8_t src) { + return vreinterpret_u8m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vreinterpret_v_i64m1_i16m1(vint64m1_t src) { + return vreinterpret_i16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vreinterpret_v_i64m2_i16m2(vint64m2_t src) { + return vreinterpret_i16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vreinterpret_v_i64m4_i16m4(vint64m4_t src) { + return vreinterpret_i16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vreinterpret_v_i64m8_i16m8(vint64m8_t src) { + return vreinterpret_i16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vreinterpret_v_u64m1_u16m1(vuint64m1_t src) { + return vreinterpret_u16m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vreinterpret_v_u64m2_u16m2(vuint64m2_t src) { + return vreinterpret_u16m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vreinterpret_v_u64m4_u16m4(vuint64m4_t src) { + return vreinterpret_u16m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vreinterpret_v_u64m8_u16m8(vuint64m8_t src) { + return vreinterpret_u16m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vreinterpret_v_i64m1_i32m1(vint64m1_t src) { + return vreinterpret_i32m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vreinterpret_v_i64m2_i32m2(vint64m2_t src) { + return vreinterpret_i32m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vreinterpret_v_i64m4_i32m4(vint64m4_t src) { + return vreinterpret_i32m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vreinterpret_v_i64m8_i32m8(vint64m8_t src) { + return vreinterpret_i32m8(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vreinterpret_v_u64m1_u32m1(vuint64m1_t src) { + return vreinterpret_u32m1(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vreinterpret_v_u64m2_u32m2(vuint64m2_t src) { + return vreinterpret_u32m2(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vreinterpret_v_u64m4_u32m4(vuint64m4_t src) { + return vreinterpret_u32m4(src); +} + +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vreinterpret_v_u64m8_u32m8(vuint64m8_t src) { + return vreinterpret_u32m8(src); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c @@ -0,0 +1,546 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv2f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv4f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv8f32( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv2f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2_t val) { + return vset(dest, 0, val); +} + +// CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv4f64( [[DEST:%.*]], [[VAL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t val) { + return vset(dest, 0, val); +} diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -170,9 +170,10 @@ public: RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, - StringRef IRName, bool HasSideEffects, bool IsMask, - bool HasMaskedOffOperand, bool HasVL, bool HasNoMaskedOverloaded, - bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, + StringRef MangledSuffix, StringRef IRName, bool HasSideEffects, + bool IsMask, bool HasMaskedOffOperand, bool HasVL, + bool HasNoMaskedOverloaded, bool HasAutoDef, + StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, StringRef RequiredExtension, unsigned NF); ~RVVIntrinsic() = default; @@ -751,8 +752,8 @@ // RVVIntrinsic implementation //===----------------------------------------------------------------------===// RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, - StringRef NewMangledName, StringRef IRName, - bool HasSideEffects, bool IsMask, + StringRef NewMangledName, StringRef MangledSuffix, + StringRef IRName, bool HasSideEffects, bool IsMask, bool HasMaskedOffOperand, bool HasVL, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &OutInTypes, @@ -771,6 +772,8 @@ MangledName = NewMangledName.str(); if (!Suffix.empty()) Name += "_" + Suffix.str(); + if (!MangledSuffix.empty()) + MangledName += "_" + MangledSuffix.str(); if (IsMask) { Name += "_m"; } @@ -1073,6 +1076,7 @@ StringRef Name = R->getValueAsString("Name"); StringRef SuffixProto = R->getValueAsString("Suffix"); StringRef MangledName = R->getValueAsString("MangledName"); + StringRef MangledSuffixProto = R->getValueAsString("MangledSuffix"); StringRef Prototypes = R->getValueAsString("Prototype"); StringRef TypeRange = R->getValueAsString("TypeRange"); bool HasMask = R->getValueAsBit("HasMask"); @@ -1147,19 +1151,20 @@ continue; auto SuffixStr = getSuffixStr(I, Log2LMUL, SuffixProto); + auto MangledSuffixStr = getSuffixStr(I, Log2LMUL, MangledSuffixProto); // Create a non-mask intrinsic Out.push_back(std::make_unique( - Name, SuffixStr, MangledName, IRName, HasSideEffects, - /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, - HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(), - IntrinsicTypes, RequiredExtension, NF)); + Name, SuffixStr, MangledName, MangledSuffixStr, IRName, + HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, + HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, + Types.getValue(), IntrinsicTypes, RequiredExtension, NF)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = computeTypes(I, Log2LMUL, NF, ProtoMaskSeq); Out.push_back(std::make_unique( - Name, SuffixStr, MangledName, IRNameMask, HasSideEffects, - /*IsMask=*/true, HasMaskedOffOperand, HasVL, + Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, + HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF)); }