diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -92,6 +92,17 @@ // (SEW=16, LMUL=4) and Log2EEW is 3 (EEW=8), and then equivalent vector // type is __rvv_uint8m2_t (elmul=(8/16)*4 = 2). Ignore to define a new // builtins if its equivalent type has illegal lmul. +// (FixedSEW:Value): Given a vector type (SEW and LMUL), and computes another +// vector type which only changed SEW as given value. Ignore to define a new +// builtin if its equivalent type has illegal lmul or the SEW does not changed. +// (SFixedLog2LMUL:Value): Smaller Fixed Log2LMUL. Given a vector type (SEW +// and LMUL), and computes another vector type which only changed LMUL as +// given value. The new LMUL should be smaller than the old one. Ignore to +// define a new builtin if its equivalent type has illegal lmul. +// (LFixedLog2LMUL:Value): Larger Fixed Log2LMUL. Given a vector type (SEW +// and LMUL), and computes another vector type which only changed LMUL as +// given value. The new LMUL should be larger than the old one. Ignore to +// define a new builtin if its equivalent type has illegal lmul. // // Following with the example above, if t is "i", then "Ue" will yield unsigned // int and "Fv" will yield __rvv_float32m1_t (again assuming LMUL=1), Fw would @@ -1272,3 +1283,69 @@ defm vcompress : RVVOutBuiltinSet<"vcompress", "csil", [["vm", "Uv", "UvUvUvm"]]>; } + +// Miscellaneous +let HasMask = false, HasVL = false, HasNoMaskedOverloaded = false, + IRName = "" in { + let Name = "vreinterpret_v", + ManualCodegen = [{ + return Builder.CreateBitCast(Ops[0], ResultType); + }] in { + // Reinterpret between different type under the same SEW and LMUL + def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil">; + def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "il">; + def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil">; + def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "il">; + def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "il">; + def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "il">; + + // Reinterpret between different SEW under the same LMUL + foreach dst_sew = ["(FixedSEW:8)", "(FixedSEW:16)", "(FixedSEW:32)", + "(FixedSEW:64)"] in { + def vreinterpret_i_ # dst_sew : RVVBuiltin<"v" # dst_sew # "v", dst_sew # "vv", "csil">; + def vreinterpret_u_ # dst_sew : RVVBuiltin<"Uv" # dst_sew # "Uv", dst_sew # "UvUv", "csil">; + } + } + + let Name = "vundefined", + ManualCodegen = [{ + return llvm::UndefValue::get(ResultType); + }] in { + def vundefined : RVVBuiltin<"v", "v", "csilfd">; + def vundefined_u : RVVBuiltin<"Uv", "Uv", "csil">; + } + + // LMUL truncation + // C/C++ Operand: VecTy, IR Operand: VecTy, Index + let Name = "vlmul_trunc_v", + ManualCodegen = [{ { + ID = Intrinsic::experimental_vector_extract; + IntrinsicTypes = {ResultType, Ops[0]->getType()}; + Ops.push_back(ConstantInt::get(Int64Ty, 0)); + return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, ""); + } }] in { + foreach dst_lmul = ["(SFixedLog2LMUL:-3)", "(SFixedLog2LMUL:-2)", "(SFixedLog2LMUL:-1)", + "(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in { + def vlmul_trunc # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vv", "csilfd">; + def vlmul_trunc_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUv", "csil">; + } + } + + // LMUL extension + // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index + let Name = "vlmul_ext_v", + ManualCodegen = [{ + ID = Intrinsic::experimental_vector_insert; + IntrinsicTypes = {ResultType, Ops[0]->getType()}; + Ops.push_back(llvm::UndefValue::get(ResultType)); + std::swap(Ops[0], Ops[1]); + Ops.push_back(ConstantInt::get(Int64Ty, 0)); + return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, ""); + }] in { + foreach dst_lmul = ["(LFixedLog2LMUL:-2)", "(LFixedLog2LMUL:-1)", "(LFixedLog2LMUL:-0)", + "(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in { + def vlmul_ext # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vv", "csilfd">; + def vlmul_ext_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUv", "csil">; + } + } +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlmul.c @@ -0,0 +1,3368 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) { + return vlmul_ext_v_i8mf8_i8mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) { + return vlmul_ext_v_i8mf8_i8mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) { + return vlmul_ext_v_i8mf8_i8m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf8_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) { + return vlmul_ext_v_i8mf8_i8m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf8_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) { + return vlmul_ext_v_i8mf8_i8m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf8_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) { + return vlmul_ext_v_i8mf8_i8m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) { + return vlmul_ext_v_i8mf4_i8mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) { + return vlmul_ext_v_i8mf4_i8m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf4_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) { + return vlmul_ext_v_i8mf4_i8m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf4_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) { + return vlmul_ext_v_i8mf4_i8m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf4_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) { + return vlmul_ext_v_i8mf4_i8m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) { + return vlmul_ext_v_i8mf2_i8m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf2_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) { + return vlmul_ext_v_i8mf2_i8m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf2_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) { + return vlmul_ext_v_i8mf2_i8m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8mf2_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) { + return vlmul_ext_v_i8mf2_i8m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8m1_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) { + return vlmul_ext_v_i8m1_i8m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8m1_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) { + return vlmul_ext_v_i8m1_i8m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8m1_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) { + return vlmul_ext_v_i8m1_i8m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8m2_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) { + return vlmul_ext_v_i8m2_i8m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8m2_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) { + return vlmul_ext_v_i8m2_i8m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i8m4_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m4_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) { + return vlmul_ext_v_i8m4_i8m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) { + return vlmul_ext_v_i16mf4_i16mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) { + return vlmul_ext_v_i16mf4_i16m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf4_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) { + return vlmul_ext_v_i16mf4_i16m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf4_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) { + return vlmul_ext_v_i16mf4_i16m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf4_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) { + return vlmul_ext_v_i16mf4_i16m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) { + return vlmul_ext_v_i16mf2_i16m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf2_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) { + return vlmul_ext_v_i16mf2_i16m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf2_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) { + return vlmul_ext_v_i16mf2_i16m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16mf2_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) { + return vlmul_ext_v_i16mf2_i16m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16m1_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) { + return vlmul_ext_v_i16m1_i16m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16m1_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) { + return vlmul_ext_v_i16m1_i16m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16m1_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) { + return vlmul_ext_v_i16m1_i16m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16m2_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) { + return vlmul_ext_v_i16m2_i16m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16m2_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) { + return vlmul_ext_v_i16m2_i16m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i16m4_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m4_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) { + return vlmul_ext_v_i16m4_i16m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32mf2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) { + return vlmul_ext_v_i32mf2_i32m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32mf2_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) { + return vlmul_ext_v_i32mf2_i32m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32mf2_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) { + return vlmul_ext_v_i32mf2_i32m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32mf2_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) { + return vlmul_ext_v_i32mf2_i32m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32m1_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) { + return vlmul_ext_v_i32m1_i32m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32m1_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) { + return vlmul_ext_v_i32m1_i32m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32m1_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) { + return vlmul_ext_v_i32m1_i32m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32m2_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) { + return vlmul_ext_v_i32m2_i32m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32m2_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) { + return vlmul_ext_v_i32m2_i32m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i32m4_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m4_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) { + return vlmul_ext_v_i32m4_i32m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i64m1_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) { + return vlmul_ext_v_i64m1_i64m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i64m1_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) { + return vlmul_ext_v_i64m1_i64m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i64m1_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) { + return vlmul_ext_v_i64m1_i64m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i64m2_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) { + return vlmul_ext_v_i64m2_i64m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i64m2_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) { + return vlmul_ext_v_i64m2_i64m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_i64m4_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m4_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) { + return vlmul_ext_v_i64m4_i64m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) { + return vlmul_ext_v_u8mf8_u8mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) { + return vlmul_ext_v_u8mf8_u8mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) { + return vlmul_ext_v_u8mf8_u8m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf8_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) { + return vlmul_ext_v_u8mf8_u8m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf8_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) { + return vlmul_ext_v_u8mf8_u8m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf8_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv1i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) { + return vlmul_ext_v_u8mf8_u8m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) { + return vlmul_ext_v_u8mf4_u8mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) { + return vlmul_ext_v_u8mf4_u8m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf4_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) { + return vlmul_ext_v_u8mf4_u8m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf4_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) { + return vlmul_ext_v_u8mf4_u8m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf4_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv2i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) { + return vlmul_ext_v_u8mf4_u8m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) { + return vlmul_ext_v_u8mf2_u8m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf2_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) { + return vlmul_ext_v_u8mf2_u8m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf2_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) { + return vlmul_ext_v_u8mf2_u8m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8mf2_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv4i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) { + return vlmul_ext_v_u8mf2_u8m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8m1_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) { + return vlmul_ext_v_u8m1_u8m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8m1_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) { + return vlmul_ext_v_u8m1_u8m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8m1_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv8i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) { + return vlmul_ext_v_u8m1_u8m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8m2_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) { + return vlmul_ext_v_u8m2_u8m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8m2_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv16i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) { + return vlmul_ext_v_u8m2_u8m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u8m4_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m4_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv64i8.nxv32i8( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) { + return vlmul_ext_v_u8m4_u8m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) { + return vlmul_ext_v_u16mf4_u16mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) { + return vlmul_ext_v_u16mf4_u16m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf4_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) { + return vlmul_ext_v_u16mf4_u16m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf4_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) { + return vlmul_ext_v_u16mf4_u16m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf4_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv1i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) { + return vlmul_ext_v_u16mf4_u16m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) { + return vlmul_ext_v_u16mf2_u16m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf2_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) { + return vlmul_ext_v_u16mf2_u16m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf2_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) { + return vlmul_ext_v_u16mf2_u16m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16mf2_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv2i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) { + return vlmul_ext_v_u16mf2_u16m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16m1_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) { + return vlmul_ext_v_u16m1_u16m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16m1_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) { + return vlmul_ext_v_u16m1_u16m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16m1_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv4i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) { + return vlmul_ext_v_u16m1_u16m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16m2_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) { + return vlmul_ext_v_u16m2_u16m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16m2_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv8i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) { + return vlmul_ext_v_u16m2_u16m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u16m4_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m4_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv32i16.nxv16i16( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) { + return vlmul_ext_v_u16m4_u16m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32mf2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) { + return vlmul_ext_v_u32mf2_u32m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32mf2_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) { + return vlmul_ext_v_u32mf2_u32m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32mf2_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) { + return vlmul_ext_v_u32mf2_u32m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32mf2_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv1i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) { + return vlmul_ext_v_u32mf2_u32m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32m1_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) { + return vlmul_ext_v_u32m1_u32m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32m1_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) { + return vlmul_ext_v_u32m1_u32m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32m1_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv2i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) { + return vlmul_ext_v_u32m1_u32m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32m2_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) { + return vlmul_ext_v_u32m2_u32m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32m2_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv4i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) { + return vlmul_ext_v_u32m2_u32m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u32m4_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m4_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16i32.nxv8i32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) { + return vlmul_ext_v_u32m4_u32m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u64m1_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) { + return vlmul_ext_v_u64m1_u64m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u64m1_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) { + return vlmul_ext_v_u64m1_u64m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u64m1_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv1i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) { + return vlmul_ext_v_u64m1_u64m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u64m2_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) { + return vlmul_ext_v_u64m2_u64m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u64m2_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv2i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) { + return vlmul_ext_v_u64m2_u64m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_u64m4_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m4_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8i64.nxv4i64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) { + return vlmul_ext_v_u64m4_u64m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32mf2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) { + return vlmul_ext_v_f32mf2_f32m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32mf2_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) { + return vlmul_ext_v_f32mf2_f32m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32mf2_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) { + return vlmul_ext_v_f32mf2_f32m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32mf2_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv1f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) { + return vlmul_ext_v_f32mf2_f32m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32m1_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) { + return vlmul_ext_v_f32m1_f32m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32m1_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) { + return vlmul_ext_v_f32m1_f32m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32m1_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv2f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) { + return vlmul_ext_v_f32m1_f32m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32m2_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) { + return vlmul_ext_v_f32m2_f32m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32m2_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv4f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) { + return vlmul_ext_v_f32m2_f32m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f32m4_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv8f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m4_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv16f32.nxv8f32( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) { + return vlmul_ext_v_f32m4_f32m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f64m1_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) { + return vlmul_ext_v_f64m1_f64m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f64m1_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) { + return vlmul_ext_v_f64m1_f64m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f64m1_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv1f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) { + return vlmul_ext_v_f64m1_f64m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f64m2_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv4f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) { + return vlmul_ext_v_f64m2_f64m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f64m2_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv2f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) { + return vlmul_ext_v_f64m2_f64m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_ext_v_f64m4_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv4f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m4_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.insert.nxv8f64.nxv4f64( undef, [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) { + return vlmul_ext_v_f64m4_f64m8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { + return vlmul_trunc_v_i8mf4_i8mf8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { + return vlmul_trunc_v_i8mf2_i8mf8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { + return vlmul_trunc_v_i8mf2_i8mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { + return vlmul_trunc_v_i8m1_i8mf8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { + return vlmul_trunc_v_i8m1_i8mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { + return vlmul_trunc_v_i8m1_i8mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { + return vlmul_trunc_v_i8m2_i8mf8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { + return vlmul_trunc_v_i8m2_i8mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { + return vlmul_trunc_v_i8m2_i8mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { + return vlmul_trunc_v_i8m2_i8m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { + return vlmul_trunc_v_i8m4_i8mf8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { + return vlmul_trunc_v_i8m4_i8mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { + return vlmul_trunc_v_i8m4_i8mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { + return vlmul_trunc_v_i8m4_i8m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { + return vlmul_trunc_v_i8m4_i8m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { + return vlmul_trunc_v_i8m8_i8mf8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { + return vlmul_trunc_v_i8m8_i8mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { + return vlmul_trunc_v_i8m8_i8mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { + return vlmul_trunc_v_i8m8_i8m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { + return vlmul_trunc_v_i8m8_i8m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { + return vlmul_trunc_v_i8m8_i8m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { + return vlmul_trunc_v_i16mf2_i16mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { + return vlmul_trunc_v_i16m1_i16mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { + return vlmul_trunc_v_i16m1_i16mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { + return vlmul_trunc_v_i16m2_i16mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { + return vlmul_trunc_v_i16m2_i16mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { + return vlmul_trunc_v_i16m2_i16m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { + return vlmul_trunc_v_i16m4_i16mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { + return vlmul_trunc_v_i16m4_i16mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { + return vlmul_trunc_v_i16m4_i16m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { + return vlmul_trunc_v_i16m4_i16m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { + return vlmul_trunc_v_i16m8_i16mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { + return vlmul_trunc_v_i16m8_i16mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { + return vlmul_trunc_v_i16m8_i16m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { + return vlmul_trunc_v_i16m8_i16m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { + return vlmul_trunc_v_i16m8_i16m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { + return vlmul_trunc_v_i32m1_i32mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { + return vlmul_trunc_v_i32m2_i32mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { + return vlmul_trunc_v_i32m2_i32m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { + return vlmul_trunc_v_i32m4_i32mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { + return vlmul_trunc_v_i32m4_i32m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { + return vlmul_trunc_v_i32m4_i32m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { + return vlmul_trunc_v_i32m8_i32mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { + return vlmul_trunc_v_i32m8_i32m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { + return vlmul_trunc_v_i32m8_i32m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { + return vlmul_trunc_v_i32m8_i32m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { + return vlmul_trunc_v_i64m2_i64m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { + return vlmul_trunc_v_i64m4_i64m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { + return vlmul_trunc_v_i64m4_i64m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { + return vlmul_trunc_v_i64m8_i64m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { + return vlmul_trunc_v_i64m8_i64m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { + return vlmul_trunc_v_i64m8_i64m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { + return vlmul_trunc_v_u8mf4_u8mf8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { + return vlmul_trunc_v_u8mf2_u8mf8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { + return vlmul_trunc_v_u8mf2_u8mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { + return vlmul_trunc_v_u8m1_u8mf8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { + return vlmul_trunc_v_u8m1_u8mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { + return vlmul_trunc_v_u8m1_u8mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { + return vlmul_trunc_v_u8m2_u8mf8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { + return vlmul_trunc_v_u8m2_u8mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { + return vlmul_trunc_v_u8m2_u8mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { + return vlmul_trunc_v_u8m2_u8m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { + return vlmul_trunc_v_u8m4_u8mf8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { + return vlmul_trunc_v_u8m4_u8mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { + return vlmul_trunc_v_u8m4_u8mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { + return vlmul_trunc_v_u8m4_u8m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { + return vlmul_trunc_v_u8m4_u8m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { + return vlmul_trunc_v_u8m8_u8mf8(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { + return vlmul_trunc_v_u8m8_u8mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { + return vlmul_trunc_v_u8m8_u8mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { + return vlmul_trunc_v_u8m8_u8m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { + return vlmul_trunc_v_u8m8_u8m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { + return vlmul_trunc_v_u8m8_u8m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { + return vlmul_trunc_v_u16mf2_u16mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { + return vlmul_trunc_v_u16m1_u16mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { + return vlmul_trunc_v_u16m1_u16mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { + return vlmul_trunc_v_u16m2_u16mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { + return vlmul_trunc_v_u16m2_u16mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { + return vlmul_trunc_v_u16m2_u16m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { + return vlmul_trunc_v_u16m4_u16mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { + return vlmul_trunc_v_u16m4_u16mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { + return vlmul_trunc_v_u16m4_u16m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { + return vlmul_trunc_v_u16m4_u16m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { + return vlmul_trunc_v_u16m8_u16mf4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { + return vlmul_trunc_v_u16m8_u16mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { + return vlmul_trunc_v_u16m8_u16m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { + return vlmul_trunc_v_u16m8_u16m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { + return vlmul_trunc_v_u16m8_u16m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { + return vlmul_trunc_v_u32m1_u32mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { + return vlmul_trunc_v_u32m2_u32mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { + return vlmul_trunc_v_u32m2_u32m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { + return vlmul_trunc_v_u32m4_u32mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { + return vlmul_trunc_v_u32m4_u32m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { + return vlmul_trunc_v_u32m4_u32m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { + return vlmul_trunc_v_u32m8_u32mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { + return vlmul_trunc_v_u32m8_u32m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { + return vlmul_trunc_v_u32m8_u32m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { + return vlmul_trunc_v_u32m8_u32m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { + return vlmul_trunc_v_u64m2_u64m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { + return vlmul_trunc_v_u64m4_u64m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { + return vlmul_trunc_v_u64m4_u64m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { + return vlmul_trunc_v_u64m8_u64m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { + return vlmul_trunc_v_u64m8_u64m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { + return vlmul_trunc_v_u64m8_u64m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { + return vlmul_trunc_v_f32m1_f32mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { + return vlmul_trunc_v_f32m2_f32mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { + return vlmul_trunc_v_f32m2_f32m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { + return vlmul_trunc_v_f32m4_f32mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { + return vlmul_trunc_v_f32m4_f32m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { + return vlmul_trunc_v_f32m4_f32m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { + return vlmul_trunc_v_f32m8_f32mf2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { + return vlmul_trunc_v_f32m8_f32m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { + return vlmul_trunc_v_f32m8_f32m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { + return vlmul_trunc_v_f32m8_f32m4(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { + return vlmul_trunc_v_f64m2_f64m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { + return vlmul_trunc_v_f64m4_f64m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { + return vlmul_trunc_v_f64m4_f64m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { + return vlmul_trunc_v_f64m8_f64m1(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { + return vlmul_trunc_v_f64m8_f64m2(op1); +} + +// CHECK-RV32-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.experimental.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { + return vlmul_trunc_v_f64m8_f64m4(op1); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vreinterpret.c @@ -0,0 +1,2608 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8mf8_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf8_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8mf8_t test_vreinterpret_v_i8mf8_u8mf8(vint8mf8_t src) { + return vreinterpret_v_i8mf8_u8mf8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8mf4_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8mf4_t test_vreinterpret_v_i8mf4_u8mf4(vint8mf4_t src) { + return vreinterpret_v_i8mf4_u8mf4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8mf2_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8mf2_t test_vreinterpret_v_i8mf2_u8mf2(vint8mf2_t src) { + return vreinterpret_v_i8mf2_u8mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8m1_t test_vreinterpret_v_i8m1_u8m1(vint8m1_t src) { + return vreinterpret_v_i8m1_u8m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m2_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8m2_t test_vreinterpret_v_i8m2_u8m2(vint8m2_t src) { + return vreinterpret_v_i8m2_u8m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m4_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8m4_t test_vreinterpret_v_i8m4_u8m4(vint8m4_t src) { + return vreinterpret_v_i8m4_u8m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m8_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint8m8_t test_vreinterpret_v_i8m8_u8m8(vint8m8_t src) { + return vreinterpret_v_i8m8_u8m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8mf8_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf8_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8mf8_t test_vreinterpret_v_u8mf8_i8mf8(vuint8mf8_t src) { + return vreinterpret_v_u8mf8_i8mf8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8mf4_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8mf4_t test_vreinterpret_v_u8mf4_i8mf4(vuint8mf4_t src) { + return vreinterpret_v_u8mf4_i8mf4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8mf2_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8mf2_t test_vreinterpret_v_u8mf2_i8mf2(vuint8mf2_t src) { + return vreinterpret_v_u8mf2_i8mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8m1_t test_vreinterpret_v_u8m1_i8m1(vuint8m1_t src) { + return vreinterpret_v_u8m1_i8m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m2_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8m2_t test_vreinterpret_v_u8m2_i8m2(vuint8m2_t src) { + return vreinterpret_v_u8m2_i8m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m4_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8m4_t test_vreinterpret_v_u8m4_i8m4(vuint8m4_t src) { + return vreinterpret_v_u8m4_i8m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m8_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint8m8_t test_vreinterpret_v_u8m8_i8m8(vuint8m8_t src) { + return vreinterpret_v_u8m8_i8m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16mf4_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint16mf4_t test_vreinterpret_v_i16mf4_u16mf4(vint16mf4_t src) { + return vreinterpret_v_i16mf4_u16mf4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16mf2_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint16mf2_t test_vreinterpret_v_i16mf2_u16mf2(vint16mf2_t src) { + return vreinterpret_v_i16mf2_u16mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint16m1_t test_vreinterpret_v_i16m1_u16m1(vint16m1_t src) { + return vreinterpret_v_i16m1_u16m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m2_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint16m2_t test_vreinterpret_v_i16m2_u16m2(vint16m2_t src) { + return vreinterpret_v_i16m2_u16m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m4_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint16m4_t test_vreinterpret_v_i16m4_u16m4(vint16m4_t src) { + return vreinterpret_v_i16m4_u16m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m8_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint16m8_t test_vreinterpret_v_i16m8_u16m8(vint16m8_t src) { + return vreinterpret_v_i16m8_u16m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16mf4_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint16mf4_t test_vreinterpret_v_u16mf4_i16mf4(vuint16mf4_t src) { + return vreinterpret_v_u16mf4_i16mf4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16mf2_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint16mf2_t test_vreinterpret_v_u16mf2_i16mf2(vuint16mf2_t src) { + return vreinterpret_v_u16mf2_i16mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint16m1_t test_vreinterpret_v_u16m1_i16m1(vuint16m1_t src) { + return vreinterpret_v_u16m1_i16m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m2_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint16m2_t test_vreinterpret_v_u16m2_i16m2(vuint16m2_t src) { + return vreinterpret_v_u16m2_i16m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m4_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint16m4_t test_vreinterpret_v_u16m4_i16m4(vuint16m4_t src) { + return vreinterpret_v_u16m4_i16m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m8_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint16m8_t test_vreinterpret_v_u16m8_i16m8(vuint16m8_t src) { + return vreinterpret_v_u16m8_i16m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32mf2_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint32mf2_t test_vreinterpret_v_i32mf2_u32mf2(vint32mf2_t src) { + return vreinterpret_v_i32mf2_u32mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint32m1_t test_vreinterpret_v_i32m1_u32m1(vint32m1_t src) { + return vreinterpret_v_i32m1_u32m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m2_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint32m2_t test_vreinterpret_v_i32m2_u32m2(vint32m2_t src) { + return vreinterpret_v_i32m2_u32m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m4_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint32m4_t test_vreinterpret_v_i32m4_u32m4(vint32m4_t src) { + return vreinterpret_v_i32m4_u32m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m8_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint32m8_t test_vreinterpret_v_i32m8_u32m8(vint32m8_t src) { + return vreinterpret_v_i32m8_u32m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32mf2_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint32mf2_t test_vreinterpret_v_u32mf2_i32mf2(vuint32mf2_t src) { + return vreinterpret_v_u32mf2_i32mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint32m1_t test_vreinterpret_v_u32m1_i32m1(vuint32m1_t src) { + return vreinterpret_v_u32m1_i32m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m2_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint32m2_t test_vreinterpret_v_u32m2_i32m2(vuint32m2_t src) { + return vreinterpret_v_u32m2_i32m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m4_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint32m4_t test_vreinterpret_v_u32m4_i32m4(vuint32m4_t src) { + return vreinterpret_v_u32m4_i32m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m8_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint32m8_t test_vreinterpret_v_u32m8_i32m8(vuint32m8_t src) { + return vreinterpret_v_u32m8_i32m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f32mf2_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vreinterpret_v_f32mf2_i32mf2(vfloat32mf2_t src) { + return vreinterpret_v_f32mf2_i32mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vreinterpret_v_f32m1_i32m1(vfloat32m1_t src) { + return vreinterpret_v_f32m1_i32m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m2_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vreinterpret_v_f32m2_i32m2(vfloat32m2_t src) { + return vreinterpret_v_f32m2_i32m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m4_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vreinterpret_v_f32m4_i32m4(vfloat32m4_t src) { + return vreinterpret_v_f32m4_i32m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m8_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vreinterpret_v_f32m8_i32m8(vfloat32m8_t src) { + return vreinterpret_v_f32m8_i32m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f32mf2_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vreinterpret_v_f32mf2_u32mf2(vfloat32mf2_t src) { + return vreinterpret_v_f32mf2_u32mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vreinterpret_v_f32m1_u32m1(vfloat32m1_t src) { + return vreinterpret_v_f32m1_u32m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m2_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vreinterpret_v_f32m2_u32m2(vfloat32m2_t src) { + return vreinterpret_v_f32m2_u32m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m4_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vreinterpret_v_f32m4_u32m4(vfloat32m4_t src) { + return vreinterpret_v_f32m4_u32m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f32m8_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vreinterpret_v_f32m8_u32m8(vfloat32m8_t src) { + return vreinterpret_v_f32m8_u32m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32mf2_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vreinterpret_v_i32mf2_f32mf2(vint32mf2_t src) { + return vreinterpret_v_i32mf2_f32mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m1_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vreinterpret_v_i32m1_f32m1(vint32m1_t src) { + return vreinterpret_v_i32m1_f32m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m2_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vreinterpret_v_i32m2_f32m2(vint32m2_t src) { + return vreinterpret_v_i32m2_f32m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m4_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vreinterpret_v_i32m4_f32m4(vint32m4_t src) { + return vreinterpret_v_i32m4_f32m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m8_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vreinterpret_v_i32m8_f32m8(vint32m8_t src) { + return vreinterpret_v_i32m8_f32m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32mf2_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vreinterpret_v_u32mf2_f32mf2(vuint32mf2_t src) { + return vreinterpret_v_u32mf2_f32mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m1_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vreinterpret_v_u32m1_f32m1(vuint32m1_t src) { + return vreinterpret_v_u32m1_f32m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m2_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vreinterpret_v_u32m2_f32m2(vuint32m2_t src) { + return vreinterpret_v_u32m2_f32m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m4_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vreinterpret_v_u32m4_f32m4(vuint32m4_t src) { + return vreinterpret_v_u32m4_f32m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m8_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vreinterpret_v_u32m8_f32m8(vuint32m8_t src) { + return vreinterpret_v_u32m8_f32m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint64m1_t test_vreinterpret_v_i64m1_u64m1(vint64m1_t src) { + return vreinterpret_v_i64m1_u64m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m2_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint64m2_t test_vreinterpret_v_i64m2_u64m2(vint64m2_t src) { + return vreinterpret_v_i64m2_u64m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m4_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint64m4_t test_vreinterpret_v_i64m4_u64m4(vint64m4_t src) { + return vreinterpret_v_i64m4_u64m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m8_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vuint64m8_t test_vreinterpret_v_i64m8_u64m8(vint64m8_t src) { + return vreinterpret_v_i64m8_u64m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint64m1_t test_vreinterpret_v_u64m1_i64m1(vuint64m1_t src) { + return vreinterpret_v_u64m1_i64m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m2_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint64m2_t test_vreinterpret_v_u64m2_i64m2(vuint64m2_t src) { + return vreinterpret_v_u64m2_i64m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m4_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint64m4_t test_vreinterpret_v_u64m4_i64m4(vuint64m4_t src) { + return vreinterpret_v_u64m4_i64m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m8_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret [[SRC:%.*]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret [[SRC:%.*]] +// +vint64m8_t test_vreinterpret_v_u64m8_i64m8(vuint64m8_t src) { + return vreinterpret_v_u64m8_i64m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vreinterpret_v_f64m1_i64m1(vfloat64m1_t src) { + return vreinterpret_v_f64m1_i64m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m2_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vreinterpret_v_f64m2_i64m2(vfloat64m2_t src) { + return vreinterpret_v_f64m2_i64m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m4_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vreinterpret_v_f64m4_i64m4(vfloat64m4_t src) { + return vreinterpret_v_f64m4_i64m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m8_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vreinterpret_v_f64m8_i64m8(vfloat64m8_t src) { + return vreinterpret_v_f64m8_i64m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vreinterpret_v_f64m1_u64m1(vfloat64m1_t src) { + return vreinterpret_v_f64m1_u64m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m2_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vreinterpret_v_f64m2_u64m2(vfloat64m2_t src) { + return vreinterpret_v_f64m2_u64m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m4_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vreinterpret_v_f64m4_u64m4(vfloat64m4_t src) { + return vreinterpret_v_f64m4_u64m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_f64m8_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vreinterpret_v_f64m8_u64m8(vfloat64m8_t src) { + return vreinterpret_v_f64m8_u64m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vreinterpret_v_i64m1_f64m1(vint64m1_t src) { + return vreinterpret_v_i64m1_f64m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m2_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vreinterpret_v_i64m2_f64m2(vint64m2_t src) { + return vreinterpret_v_i64m2_f64m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m4_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vreinterpret_v_i64m4_f64m4(vint64m4_t src) { + return vreinterpret_v_i64m4_f64m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m8_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vreinterpret_v_i64m8_f64m8(vint64m8_t src) { + return vreinterpret_v_i64m8_f64m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m1_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vreinterpret_v_u64m1_f64m1(vuint64m1_t src) { + return vreinterpret_v_u64m1_f64m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m2_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vreinterpret_v_u64m2_f64m2(vuint64m2_t src) { + return vreinterpret_v_u64m2_f64m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m4_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vreinterpret_v_u64m4_f64m4(vuint64m4_t src) { + return vreinterpret_v_u64m4_f64m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m8_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vreinterpret_v_u64m8_f64m8(vuint64m8_t src) { + return vreinterpret_v_u64m8_f64m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8mf4_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vreinterpret_v_i8mf4_i16mf4(vint8mf4_t src) { + return vreinterpret_v_i8mf4_i16mf4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8mf2_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vreinterpret_v_i8mf2_i16mf2(vint8mf2_t src) { + return vreinterpret_v_i8mf2_i16mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vreinterpret_v_i8m1_i16m1(vint8m1_t src) { + return vreinterpret_v_i8m1_i16m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m2_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vreinterpret_v_i8m2_i16m2(vint8m2_t src) { + return vreinterpret_v_i8m2_i16m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m4_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vreinterpret_v_i8m4_i16m4(vint8m4_t src) { + return vreinterpret_v_i8m4_i16m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m8_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vreinterpret_v_i8m8_i16m8(vint8m8_t src) { + return vreinterpret_v_i8m8_i16m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8mf4_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vreinterpret_v_u8mf4_u16mf4(vuint8mf4_t src) { + return vreinterpret_v_u8mf4_u16mf4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8mf2_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vreinterpret_v_u8mf2_u16mf2(vuint8mf2_t src) { + return vreinterpret_v_u8mf2_u16mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vreinterpret_v_u8m1_u16m1(vuint8m1_t src) { + return vreinterpret_v_u8m1_u16m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m2_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vreinterpret_v_u8m2_u16m2(vuint8m2_t src) { + return vreinterpret_v_u8m2_u16m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m4_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vreinterpret_v_u8m4_u16m4(vuint8m4_t src) { + return vreinterpret_v_u8m4_u16m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m8_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vreinterpret_v_u8m8_u16m8(vuint8m8_t src) { + return vreinterpret_v_u8m8_u16m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8mf2_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vreinterpret_v_i8mf2_i32mf2(vint8mf2_t src) { + return vreinterpret_v_i8mf2_i32mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vreinterpret_v_i8m1_i32m1(vint8m1_t src) { + return vreinterpret_v_i8m1_i32m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m2_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vreinterpret_v_i8m2_i32m2(vint8m2_t src) { + return vreinterpret_v_i8m2_i32m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m4_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vreinterpret_v_i8m4_i32m4(vint8m4_t src) { + return vreinterpret_v_i8m4_i32m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m8_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vreinterpret_v_i8m8_i32m8(vint8m8_t src) { + return vreinterpret_v_i8m8_i32m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8mf2_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vreinterpret_v_u8mf2_u32mf2(vuint8mf2_t src) { + return vreinterpret_v_u8mf2_u32mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vreinterpret_v_u8m1_u32m1(vuint8m1_t src) { + return vreinterpret_v_u8m1_u32m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m2_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vreinterpret_v_u8m2_u32m2(vuint8m2_t src) { + return vreinterpret_v_u8m2_u32m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m4_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vreinterpret_v_u8m4_u32m4(vuint8m4_t src) { + return vreinterpret_v_u8m4_u32m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m8_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vreinterpret_v_u8m8_u32m8(vuint8m8_t src) { + return vreinterpret_v_u8m8_u32m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vreinterpret_v_i8m1_i64m1(vint8m1_t src) { + return vreinterpret_v_i8m1_i64m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m2_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vreinterpret_v_i8m2_i64m2(vint8m2_t src) { + return vreinterpret_v_i8m2_i64m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m4_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vreinterpret_v_i8m4_i64m4(vint8m4_t src) { + return vreinterpret_v_i8m4_i64m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i8m8_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vreinterpret_v_i8m8_i64m8(vint8m8_t src) { + return vreinterpret_v_i8m8_i64m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vreinterpret_v_u8m1_u64m1(vuint8m1_t src) { + return vreinterpret_v_u8m1_u64m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m2_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vreinterpret_v_u8m2_u64m2(vuint8m2_t src) { + return vreinterpret_v_u8m2_u64m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m4_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vreinterpret_v_u8m4_u64m4(vuint8m4_t src) { + return vreinterpret_v_u8m4_u64m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u8m8_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vreinterpret_v_u8m8_u64m8(vuint8m8_t src) { + return vreinterpret_v_u8m8_u64m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16mf4_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vreinterpret_v_i16mf4_i8mf4(vint16mf4_t src) { + return vreinterpret_v_i16mf4_i8mf4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16mf2_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vreinterpret_v_i16mf2_i8mf2(vint16mf2_t src) { + return vreinterpret_v_i16mf2_i8mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vreinterpret_v_i16m1_i8m1(vint16m1_t src) { + return vreinterpret_v_i16m1_i8m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m2_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vreinterpret_v_i16m2_i8m2(vint16m2_t src) { + return vreinterpret_v_i16m2_i8m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m4_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vreinterpret_v_i16m4_i8m4(vint16m4_t src) { + return vreinterpret_v_i16m4_i8m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m8_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vreinterpret_v_i16m8_i8m8(vint16m8_t src) { + return vreinterpret_v_i16m8_i8m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16mf4_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vreinterpret_v_u16mf4_u8mf4(vuint16mf4_t src) { + return vreinterpret_v_u16mf4_u8mf4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16mf2_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vreinterpret_v_u16mf2_u8mf2(vuint16mf2_t src) { + return vreinterpret_v_u16mf2_u8mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vreinterpret_v_u16m1_u8m1(vuint16m1_t src) { + return vreinterpret_v_u16m1_u8m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m2_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vreinterpret_v_u16m2_u8m2(vuint16m2_t src) { + return vreinterpret_v_u16m2_u8m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m4_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vreinterpret_v_u16m4_u8m4(vuint16m4_t src) { + return vreinterpret_v_u16m4_u8m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m8_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vreinterpret_v_u16m8_u8m8(vuint16m8_t src) { + return vreinterpret_v_u16m8_u8m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16mf2_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vreinterpret_v_i16mf2_i32mf2(vint16mf2_t src) { + return vreinterpret_v_i16mf2_i32mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vreinterpret_v_i16m1_i32m1(vint16m1_t src) { + return vreinterpret_v_i16m1_i32m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m2_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vreinterpret_v_i16m2_i32m2(vint16m2_t src) { + return vreinterpret_v_i16m2_i32m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m4_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vreinterpret_v_i16m4_i32m4(vint16m4_t src) { + return vreinterpret_v_i16m4_i32m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m8_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vreinterpret_v_i16m8_i32m8(vint16m8_t src) { + return vreinterpret_v_i16m8_i32m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16mf2_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vreinterpret_v_u16mf2_u32mf2(vuint16mf2_t src) { + return vreinterpret_v_u16mf2_u32mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vreinterpret_v_u16m1_u32m1(vuint16m1_t src) { + return vreinterpret_v_u16m1_u32m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m2_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vreinterpret_v_u16m2_u32m2(vuint16m2_t src) { + return vreinterpret_v_u16m2_u32m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m4_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vreinterpret_v_u16m4_u32m4(vuint16m4_t src) { + return vreinterpret_v_u16m4_u32m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m8_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vreinterpret_v_u16m8_u32m8(vuint16m8_t src) { + return vreinterpret_v_u16m8_u32m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vreinterpret_v_i16m1_i64m1(vint16m1_t src) { + return vreinterpret_v_i16m1_i64m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m2_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vreinterpret_v_i16m2_i64m2(vint16m2_t src) { + return vreinterpret_v_i16m2_i64m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m4_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vreinterpret_v_i16m4_i64m4(vint16m4_t src) { + return vreinterpret_v_i16m4_i64m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i16m8_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vreinterpret_v_i16m8_i64m8(vint16m8_t src) { + return vreinterpret_v_i16m8_i64m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vreinterpret_v_u16m1_u64m1(vuint16m1_t src) { + return vreinterpret_v_u16m1_u64m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m2_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vreinterpret_v_u16m2_u64m2(vuint16m2_t src) { + return vreinterpret_v_u16m2_u64m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m4_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vreinterpret_v_u16m4_u64m4(vuint16m4_t src) { + return vreinterpret_v_u16m4_u64m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u16m8_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vreinterpret_v_u16m8_u64m8(vuint16m8_t src) { + return vreinterpret_v_u16m8_u64m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32mf2_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vreinterpret_v_i32mf2_i8mf2(vint32mf2_t src) { + return vreinterpret_v_i32mf2_i8mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vreinterpret_v_i32m1_i8m1(vint32m1_t src) { + return vreinterpret_v_i32m1_i8m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m2_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vreinterpret_v_i32m2_i8m2(vint32m2_t src) { + return vreinterpret_v_i32m2_i8m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m4_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vreinterpret_v_i32m4_i8m4(vint32m4_t src) { + return vreinterpret_v_i32m4_i8m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m8_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vreinterpret_v_i32m8_i8m8(vint32m8_t src) { + return vreinterpret_v_i32m8_i8m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32mf2_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vreinterpret_v_u32mf2_u8mf2(vuint32mf2_t src) { + return vreinterpret_v_u32mf2_u8mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vreinterpret_v_u32m1_u8m1(vuint32m1_t src) { + return vreinterpret_v_u32m1_u8m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m2_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vreinterpret_v_u32m2_u8m2(vuint32m2_t src) { + return vreinterpret_v_u32m2_u8m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m4_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vreinterpret_v_u32m4_u8m4(vuint32m4_t src) { + return vreinterpret_v_u32m4_u8m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m8_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vreinterpret_v_u32m8_u8m8(vuint32m8_t src) { + return vreinterpret_v_u32m8_u8m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32mf2_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vreinterpret_v_i32mf2_i16mf2(vint32mf2_t src) { + return vreinterpret_v_i32mf2_i16mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vreinterpret_v_i32m1_i16m1(vint32m1_t src) { + return vreinterpret_v_i32m1_i16m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m2_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vreinterpret_v_i32m2_i16m2(vint32m2_t src) { + return vreinterpret_v_i32m2_i16m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m4_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vreinterpret_v_i32m4_i16m4(vint32m4_t src) { + return vreinterpret_v_i32m4_i16m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m8_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vreinterpret_v_i32m8_i16m8(vint32m8_t src) { + return vreinterpret_v_i32m8_i16m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32mf2_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vreinterpret_v_u32mf2_u16mf2(vuint32mf2_t src) { + return vreinterpret_v_u32mf2_u16mf2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vreinterpret_v_u32m1_u16m1(vuint32m1_t src) { + return vreinterpret_v_u32m1_u16m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m2_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vreinterpret_v_u32m2_u16m2(vuint32m2_t src) { + return vreinterpret_v_u32m2_u16m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m4_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vreinterpret_v_u32m4_u16m4(vuint32m4_t src) { + return vreinterpret_v_u32m4_u16m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m8_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vreinterpret_v_u32m8_u16m8(vuint32m8_t src) { + return vreinterpret_v_u32m8_u16m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m1_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vreinterpret_v_i32m1_i64m1(vint32m1_t src) { + return vreinterpret_v_i32m1_i64m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m2_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vreinterpret_v_i32m2_i64m2(vint32m2_t src) { + return vreinterpret_v_i32m2_i64m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m4_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vreinterpret_v_i32m4_i64m4(vint32m4_t src) { + return vreinterpret_v_i32m4_i64m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i32m8_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vreinterpret_v_i32m8_i64m8(vint32m8_t src) { + return vreinterpret_v_i32m8_i64m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m1_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vreinterpret_v_u32m1_u64m1(vuint32m1_t src) { + return vreinterpret_v_u32m1_u64m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m2_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vreinterpret_v_u32m2_u64m2(vuint32m2_t src) { + return vreinterpret_v_u32m2_u64m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m4_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vreinterpret_v_u32m4_u64m4(vuint32m4_t src) { + return vreinterpret_v_u32m4_u64m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u32m8_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vreinterpret_v_u32m8_u64m8(vuint32m8_t src) { + return vreinterpret_v_u32m8_u64m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m1_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vreinterpret_v_i64m1_i8m1(vint64m1_t src) { + return vreinterpret_v_i64m1_i8m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m2_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vreinterpret_v_i64m2_i8m2(vint64m2_t src) { + return vreinterpret_v_i64m2_i8m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m4_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vreinterpret_v_i64m4_i8m4(vint64m4_t src) { + return vreinterpret_v_i64m4_i8m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m8_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vreinterpret_v_i64m8_i8m8(vint64m8_t src) { + return vreinterpret_v_i64m8_i8m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m1_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vreinterpret_v_u64m1_u8m1(vuint64m1_t src) { + return vreinterpret_v_u64m1_u8m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m2_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vreinterpret_v_u64m2_u8m2(vuint64m2_t src) { + return vreinterpret_v_u64m2_u8m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m4_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vreinterpret_v_u64m4_u8m4(vuint64m4_t src) { + return vreinterpret_v_u64m4_u8m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m8_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vreinterpret_v_u64m8_u8m8(vuint64m8_t src) { + return vreinterpret_v_u64m8_u8m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m1_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vreinterpret_v_i64m1_i16m1(vint64m1_t src) { + return vreinterpret_v_i64m1_i16m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m2_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vreinterpret_v_i64m2_i16m2(vint64m2_t src) { + return vreinterpret_v_i64m2_i16m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m4_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vreinterpret_v_i64m4_i16m4(vint64m4_t src) { + return vreinterpret_v_i64m4_i16m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m8_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vreinterpret_v_i64m8_i16m8(vint64m8_t src) { + return vreinterpret_v_i64m8_i16m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m1_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vreinterpret_v_u64m1_u16m1(vuint64m1_t src) { + return vreinterpret_v_u64m1_u16m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m2_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vreinterpret_v_u64m2_u16m2(vuint64m2_t src) { + return vreinterpret_v_u64m2_u16m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m4_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vreinterpret_v_u64m4_u16m4(vuint64m4_t src) { + return vreinterpret_v_u64m4_u16m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m8_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vreinterpret_v_u64m8_u16m8(vuint64m8_t src) { + return vreinterpret_v_u64m8_u16m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m1_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vreinterpret_v_i64m1_i32m1(vint64m1_t src) { + return vreinterpret_v_i64m1_i32m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m2_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vreinterpret_v_i64m2_i32m2(vint64m2_t src) { + return vreinterpret_v_i64m2_i32m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m4_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vreinterpret_v_i64m4_i32m4(vint64m4_t src) { + return vreinterpret_v_i64m4_i32m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_i64m8_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vreinterpret_v_i64m8_i32m8(vint64m8_t src) { + return vreinterpret_v_i64m8_i32m8(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m1_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vreinterpret_v_u64m1_u32m1(vuint64m1_t src) { + return vreinterpret_v_u64m1_u32m1(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m2_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vreinterpret_v_u64m2_u32m2(vuint64m2_t src) { + return vreinterpret_v_u64m2_u32m2(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m4_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vreinterpret_v_u64m4_u32m4(vuint64m4_t src) { + return vreinterpret_v_u64m4_u32m4(src); +} + +// CHECK-RV32-LABEL: @test_vreinterpret_v_u64m8_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = bitcast [[SRC:%.*]] to +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vreinterpret_v_u64m8_u32m8(vuint64m8_t src) { + return vreinterpret_v_u64m8_u32m8(src); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vundefined.c @@ -0,0 +1,538 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV32-LABEL: @test_vundefined_i8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint8mf8_t test_vundefined_i8mf8() { return vundefined_i8mf8(); } + +// CHECK-RV32-LABEL: @test_vundefined_i8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint8mf4_t test_vundefined_i8mf4() { return vundefined_i8mf4(); } + +// CHECK-RV32-LABEL: @test_vundefined_i8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint8mf2_t test_vundefined_i8mf2() { return vundefined_i8mf2(); } + +// CHECK-RV32-LABEL: @test_vundefined_i8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint8m1_t test_vundefined_i8m1() { return vundefined_i8m1(); } + +// CHECK-RV32-LABEL: @test_vundefined_i8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint8m2_t test_vundefined_i8m2() { return vundefined_i8m2(); } + +// CHECK-RV32-LABEL: @test_vundefined_i8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint8m4_t test_vundefined_i8m4() { return vundefined_i8m4(); } + +// CHECK-RV32-LABEL: @test_vundefined_i8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint8m8_t test_vundefined_i8m8() { return vundefined_i8m8(); } + +// CHECK-RV32-LABEL: @test_vundefined_i16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint16mf4_t test_vundefined_i16mf4() { return vundefined_i16mf4(); } + +// CHECK-RV32-LABEL: @test_vundefined_i16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint16mf2_t test_vundefined_i16mf2() { return vundefined_i16mf2(); } + +// CHECK-RV32-LABEL: @test_vundefined_i16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint16m1_t test_vundefined_i16m1() { return vundefined_i16m1(); } + +// CHECK-RV32-LABEL: @test_vundefined_i16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint16m2_t test_vundefined_i16m2() { return vundefined_i16m2(); } + +// CHECK-RV32-LABEL: @test_vundefined_i16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint16m4_t test_vundefined_i16m4() { return vundefined_i16m4(); } + +// CHECK-RV32-LABEL: @test_vundefined_i16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint16m8_t test_vundefined_i16m8() { return vundefined_i16m8(); } + +// CHECK-RV32-LABEL: @test_vundefined_i32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint32mf2_t test_vundefined_i32mf2() { return vundefined_i32mf2(); } + +// CHECK-RV32-LABEL: @test_vundefined_i32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint32m1_t test_vundefined_i32m1() { return vundefined_i32m1(); } + +// CHECK-RV32-LABEL: @test_vundefined_i32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint32m2_t test_vundefined_i32m2() { return vundefined_i32m2(); } + +// CHECK-RV32-LABEL: @test_vundefined_i32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint32m4_t test_vundefined_i32m4() { return vundefined_i32m4(); } + +// CHECK-RV32-LABEL: @test_vundefined_i32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint32m8_t test_vundefined_i32m8() { return vundefined_i32m8(); } + +// CHECK-RV32-LABEL: @test_vundefined_i64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint64m1_t test_vundefined_i64m1() { return vundefined_i64m1(); } + +// CHECK-RV32-LABEL: @test_vundefined_i64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint64m2_t test_vundefined_i64m2() { return vundefined_i64m2(); } + +// CHECK-RV32-LABEL: @test_vundefined_i64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint64m4_t test_vundefined_i64m4() { return vundefined_i64m4(); } + +// CHECK-RV32-LABEL: @test_vundefined_i64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vint64m8_t test_vundefined_i64m8() { return vundefined_i64m8(); } + +// CHECK-RV32-LABEL: @test_vundefined_u8mf8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint8mf8_t test_vundefined_u8mf8() { return vundefined_u8mf8(); } + +// CHECK-RV32-LABEL: @test_vundefined_u8mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint8mf4_t test_vundefined_u8mf4() { return vundefined_u8mf4(); } + +// CHECK-RV32-LABEL: @test_vundefined_u8mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint8mf2_t test_vundefined_u8mf2() { return vundefined_u8mf2(); } + +// CHECK-RV32-LABEL: @test_vundefined_u8m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint8m1_t test_vundefined_u8m1() { return vundefined_u8m1(); } + +// CHECK-RV32-LABEL: @test_vundefined_u8m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint8m2_t test_vundefined_u8m2() { return vundefined_u8m2(); } + +// CHECK-RV32-LABEL: @test_vundefined_u8m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint8m4_t test_vundefined_u8m4() { return vundefined_u8m4(); } + +// CHECK-RV32-LABEL: @test_vundefined_u8m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u8m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint8m8_t test_vundefined_u8m8() { return vundefined_u8m8(); } + +// CHECK-RV32-LABEL: @test_vundefined_u16mf4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint16mf4_t test_vundefined_u16mf4() { return vundefined_u16mf4(); } + +// CHECK-RV32-LABEL: @test_vundefined_u16mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint16mf2_t test_vundefined_u16mf2() { return vundefined_u16mf2(); } + +// CHECK-RV32-LABEL: @test_vundefined_u16m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint16m1_t test_vundefined_u16m1() { return vundefined_u16m1(); } + +// CHECK-RV32-LABEL: @test_vundefined_u16m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint16m2_t test_vundefined_u16m2() { return vundefined_u16m2(); } + +// CHECK-RV32-LABEL: @test_vundefined_u16m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint16m4_t test_vundefined_u16m4() { return vundefined_u16m4(); } + +// CHECK-RV32-LABEL: @test_vundefined_u16m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint16m8_t test_vundefined_u16m8() { return vundefined_u16m8(); } + +// CHECK-RV32-LABEL: @test_vundefined_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint32mf2_t test_vundefined_u32mf2() { return vundefined_u32mf2(); } + +// CHECK-RV32-LABEL: @test_vundefined_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint32m1_t test_vundefined_u32m1() { return vundefined_u32m1(); } + +// CHECK-RV32-LABEL: @test_vundefined_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint32m2_t test_vundefined_u32m2() { return vundefined_u32m2(); } + +// CHECK-RV32-LABEL: @test_vundefined_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint32m4_t test_vundefined_u32m4() { return vundefined_u32m4(); } + +// CHECK-RV32-LABEL: @test_vundefined_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint32m8_t test_vundefined_u32m8() { return vundefined_u32m8(); } + +// CHECK-RV32-LABEL: @test_vundefined_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint64m1_t test_vundefined_u64m1() { return vundefined_u64m1(); } + +// CHECK-RV32-LABEL: @test_vundefined_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint64m2_t test_vundefined_u64m2() { return vundefined_u64m2(); } + +// CHECK-RV32-LABEL: @test_vundefined_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint64m4_t test_vundefined_u64m4() { return vundefined_u64m4(); } + +// CHECK-RV32-LABEL: @test_vundefined_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vuint64m8_t test_vundefined_u64m8() { return vundefined_u64m8(); } + +// CHECK-RV32-LABEL: @test_vundefined_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vfloat32mf2_t test_vundefined_f32mf2() { return vundefined_f32mf2(); } + +// CHECK-RV32-LABEL: @test_vundefined_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vfloat32m1_t test_vundefined_f32m1() { return vundefined_f32m1(); } + +// CHECK-RV32-LABEL: @test_vundefined_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vfloat32m2_t test_vundefined_f32m2() { return vundefined_f32m2(); } + +// CHECK-RV32-LABEL: @test_vundefined_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vfloat32m4_t test_vundefined_f32m4() { return vundefined_f32m4(); } + +// CHECK-RV32-LABEL: @test_vundefined_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vfloat32m8_t test_vundefined_f32m8() { return vundefined_f32m8(); } + +// CHECK-RV32-LABEL: @test_vundefined_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vfloat64m1_t test_vundefined_f64m1() { return vundefined_f64m1(); } + +// CHECK-RV32-LABEL: @test_vundefined_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vfloat64m2_t test_vundefined_f64m2() { return vundefined_f64m2(); } + +// CHECK-RV32-LABEL: @test_vundefined_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vfloat64m4_t test_vundefined_f64m4() { return vundefined_f64m4(); } + +// CHECK-RV32-LABEL: @test_vundefined_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: ret undef +// +// CHECK-RV64-LABEL: @test_vundefined_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: ret undef +// +vfloat64m8_t test_vundefined_f64m8() { return vundefined_f64m8(); } diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -31,11 +31,8 @@ namespace { // Exponential LMUL -class LMULType { -private: +struct LMULType { int Log2LMUL; - -public: LMULType(int Log2LMUL); // Return the C/C++ string representation of LMUL std::string str() const; @@ -188,6 +185,7 @@ bool isMask() const { return IsMask; } size_t getNumOperand() const { return InputTypes.size(); } StringRef getIRName() const { return IRName; } + StringRef getManualCodegen() const { return ManualCodegen; } uint8_t getRISCVExtensions() const { return RISCVExtensions; } // Return the type string for a BUILTIN() macro in Builtins.def. @@ -655,6 +653,20 @@ PrintFatalError( "Illegal type transformer for Complex type transformer"); }; + auto ComputeFixedLog2LMUL = + [&](StringRef Value, + std::function Compare) { + int32_t Log2LMUL; + Value.getAsInteger(10, Log2LMUL); + if (!Compare(Log2LMUL, LMUL.Log2LMUL)) { + ScalarType = Invalid; + return false; + } + // Update new LMUL + LMUL = LMULType(Log2LMUL); + UpdateAndCheckComplexProto(); + return true; + }; auto ComplexTT = ComplexType.split(":"); if (ComplexTT.first == "Log2EEW") { uint32_t Log2EEW; @@ -665,6 +677,25 @@ ElementBitwidth = 1 << Log2EEW; ScalarType = ScalarTypeKind::SignedInteger; UpdateAndCheckComplexProto(); + } else if (ComplexTT.first == "FixedSEW") { + uint32_t NewSEW; + ComplexTT.second.getAsInteger(10, NewSEW); + // Set invalid type if src and dst SEW are same. + if (ElementBitwidth == NewSEW) { + ScalarType = Invalid; + return; + } + // Update new SEW + ElementBitwidth = NewSEW; + UpdateAndCheckComplexProto(); + } else if (ComplexTT.first == "LFixedLog2LMUL") { + // New LMUL should be larger than old + if (!ComputeFixedLog2LMUL(ComplexTT.second, std::greater())) + return; + } else if (ComplexTT.first == "SFixedLog2LMUL") { + // New LMUL should be smaller than old + if (!ComputeFixedLog2LMUL(ComplexTT.second, std::less())) + return; } else { PrintFatalError("Illegal complex type transformers!"); } @@ -806,8 +837,8 @@ } void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const { - - OS << " ID = Intrinsic::riscv_" + getIRName() + ";\n"; + if (!getIRName().empty()) + OS << " ID = Intrinsic::riscv_" + getIRName() + ";\n"; if (hasManualCodegen()) { OS << ManualCodegen; OS << "break;\n"; @@ -1007,18 +1038,20 @@ void RVVEmitter::createCodeGen(raw_ostream &OS) { std::vector> Defs; createRVVIntrinsics(Defs); - - // The same intrinsic IR name has the same switch body. + // IR name could be empty, use the stable sort preserves the relative order. std::stable_sort(Defs.begin(), Defs.end(), [](const std::unique_ptr &A, const std::unique_ptr &B) { return A->getIRName() < B->getIRName(); }); - // Print switch body when the ir name changes from previous iteration. + // Print switch body when the ir name or ManualCodegen changes from previous + // iteration. RVVIntrinsic *PrevDef = Defs.begin()->get(); for (auto &Def : Defs) { StringRef CurIRName = Def->getIRName(); - if (CurIRName != PrevDef->getIRName()) { + if (CurIRName != PrevDef->getIRName() || + (CurIRName.empty() && + Def->getManualCodegen() != PrevDef->getManualCodegen())) { PrevDef->emitCodeGenSwitchBody(OS); } PrevDef = Def.get();