diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -2168,12 +2168,12 @@ defm vmv_x : RVVOp0BuiltinSet<"vmv_x_s", "csil", [["s", "ve", "ev"], ["s", "UvUe", "UeUv"]]>; - let OverloadedName = "vmv_s", IsPrototypeDefaultTU = true, + let OverloadedName = "vmv_s", IsPrototypeDefaultTU = false, UnMaskedPolicyScheme = HasPassthruOperand, SupportOverloading = false in defm vmv_s : RVVOutBuiltinSet<"vmv_s_x", "csil", - [["x", "v", "vve"], - ["x", "Uv", "UvUvUe"]]>; + [["x", "v", "ve"], + ["x", "Uv", "UvUe"]]>; } // 17.2. Floating-Point Scalar Move Instructions @@ -2181,12 +2181,12 @@ let HasVL = false, OverloadedName = "vfmv_f" in defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "xfd", [["s", "ve", "ev"]]>; - let OverloadedName = "vfmv_s", IsPrototypeDefaultTU = true, + let OverloadedName = "vfmv_s", IsPrototypeDefaultTU = false, UnMaskedPolicyScheme = HasPassthruOperand, SupportOverloading = false in defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "xfd", - [["f", "v", "vve"], - ["x", "Uv", "UvUvUe"]]>; + [["f", "v", "ve"], + ["x", "Uv", "UvUe"]]>; } // 17.3. Vector Slide Instructions diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmv.c @@ -1,72 +1,99 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include -// CHECK-RV64-LABEL: @test_vfmv_f_s_f32mf2_f32( +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf4_f16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv1f32( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret float [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv1f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] // -float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) { return vfmv_f(src); } +_Float16 test_vfmv_f_s_f16mf4_f16 (vfloat16mf4_t src) { + return vfmv_f(src); +} -// CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2( +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf2_f16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv2f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] // -vfloat32mf2_t test_vfmv_s_f_f32mf2(vfloat32mf2_t dst, float src, size_t vl) { - return vfmv_s(dst, src, vl); +_Float16 test_vfmv_f_s_f16mf2_f16 (vfloat16mf2_t src) { + return vfmv_f(src); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f32m1_f32( +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m1_f16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv2f32( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret float [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv4f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] // -float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) { return vfmv_f(src); } +_Float16 test_vfmv_f_s_f16m1_f16 (vfloat16m1_t src) { + return vfmv_f(src); +} -// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1( +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m2_f16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv8f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] // -vfloat32m1_t test_vfmv_s_f_f32m1(vfloat32m1_t dst, float src, size_t vl) { - return vfmv_s(dst, src, vl); +_Float16 test_vfmv_f_s_f16m2_f16 (vfloat16m2_t src) { + return vfmv_f(src); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f32m2_f32( +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m4_f16( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv4f32( [[SRC:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv16f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m4_f16 (vfloat16m4_t src) { + return vfmv_f(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m8_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv32f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m8_f16 (vfloat16m8_t src) { + return vfmv_f(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f32mf2_f32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv1f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) { return vfmv_f(src); } +float test_vfmv_f_s_f32mf2_f32 (vfloat32mf2_t src) { + return vfmv_f(src); +} -// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2( +// CHECK-RV64-LABEL: @test_vfmv_f_s_f32m1_f32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv2f32( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret float [[TMP0]] // -vfloat32m2_t test_vfmv_s_f_f32m2(vfloat32m2_t dst, float src, size_t vl) { - return vfmv_s(dst, src, vl); +float test_vfmv_f_s_f32m1_f32 (vfloat32m1_t src) { + return vfmv_f(src); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f32m4_f32( +// CHECK-RV64-LABEL: @test_vfmv_f_s_f32m2_f32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv8f32( [[SRC:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv4f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) { return vfmv_f(src); } +float test_vfmv_f_s_f32m2_f32 (vfloat32m2_t src) { + return vfmv_f(src); +} -// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4( +// CHECK-RV64-LABEL: @test_vfmv_f_s_f32m4_f32( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv8f32( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret float [[TMP0]] // -vfloat32m4_t test_vfmv_s_f_f32m4(vfloat32m4_t dst, float src, size_t vl) { - return vfmv_s(dst, src, vl); +float test_vfmv_f_s_f32m4_f32 (vfloat32m4_t src) { + return vfmv_f(src); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m8_f32( @@ -74,95 +101,312 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv16f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) { return vfmv_f(src); } +float test_vfmv_f_s_f32m8_f32 (vfloat32m8_t src) { + return vfmv_f(src); +} -// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8( +// CHECK-RV64-LABEL: @test_vfmv_f_s_f64m1_f64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv1f64( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret double [[TMP0]] // -vfloat32m8_t test_vfmv_s_f_f32m8(vfloat32m8_t dst, float src, size_t vl) { - return vfmv_s(dst, src, vl); +double test_vfmv_f_s_f64m1_f64 (vfloat64m1_t src) { + return vfmv_f(src); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f64m1_f64( +// CHECK-RV64-LABEL: @test_vfmv_f_s_f64m2_f64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv1f64( [[SRC:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv2f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // -double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) { return vfmv_f(src); } +double test_vfmv_f_s_f64m2_f64 (vfloat64m2_t src) { + return vfmv_f(src); +} -// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1( +// CHECK-RV64-LABEL: @test_vfmv_f_s_f64m4_f64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv4f64( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret double [[TMP0]] // -vfloat64m1_t test_vfmv_s_f_f64m1(vfloat64m1_t dst, double src, size_t vl) { - return vfmv_s(dst, src, vl); +double test_vfmv_f_s_f64m4_f64 (vfloat64m4_t src) { + return vfmv_f(src); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f64m2_f64( +// CHECK-RV64-LABEL: @test_vfmv_f_s_f64m8_f64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv2f64( [[SRC:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv8f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // -double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) { return vfmv_f(src); } +double test_vfmv_f_s_f64m8_f64 (vfloat64m8_t src) { + return vfmv_f(src); +} -// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmv_s_f_f64m2(vfloat64m2_t dst, double src, size_t vl) { - return vfmv_s(dst, src, vl); +vfloat16mf4_t test_vfmv_v_f_f16mf4_tu (vfloat16mf4_t maskedoff, _Float16 src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f64m4_f64( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv4f64( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret double [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) { return vfmv_f(src); } +vfloat16mf2_t test_vfmv_v_f_f16mf2_tu (vfloat16mf2_t maskedoff, _Float16 src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); +} -// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmv_s_f_f64m4(vfloat64m4_t dst, double src, size_t vl) { - return vfmv_s(dst, src, vl); +vfloat16m1_t test_vfmv_v_f_f16m1_tu (vfloat16m1_t maskedoff, _Float16 src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f64m8_f64( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv8f64( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret double [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) { return vfmv_f(src); } +vfloat16m2_t test_vfmv_v_f_f16m2_tu (vfloat16m2_t maskedoff, _Float16 src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); +} -// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmv_s_f_f64m8(vfloat64m8_t dst, double src, size_t vl) { - return vfmv_s(dst, src, vl); +vfloat16m4_t test_vfmv_v_f_f16m4_tu (vfloat16m4_t maskedoff, _Float16 src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmv_v_f_f16m8_tu (vfloat16m8_t maskedoff, _Float16 src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f32.i64( [[MERGE:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmv_v_f_f32mf2_tu(vfloat32mf2_t merge, float src, size_t vl) { - return vfmv_v_tu(merge, src, vl); +vfloat32mf2_t test_vfmv_v_f_f32mf2_tu (vfloat32mf2_t maskedoff, float src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmv_v_f_f32m1_tu (vfloat32m1_t maskedoff, float src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmv_v_f_f32m2_tu (vfloat32m2_t maskedoff, float src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmv_v_f_f32m4_tu (vfloat32m4_t maskedoff, float src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmv_v_f_f32m8_tu (vfloat32m8_t maskedoff, float src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmv_v_f_f64m1_tu (vfloat64m1_t maskedoff, double src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmv_v_f_f64m2_tu (vfloat64m2_t maskedoff, double src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmv_v_f_f64m4_tu (vfloat64m4_t maskedoff, double src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmv_v_f_f64m8_tu (vfloat64m8_t maskedoff, double src, size_t vl) { + return vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmv_s_f_f16mf4_tu (vfloat16mf4_t maskedoff, _Float16 src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmv_s_f_f16mf2_tu (vfloat16mf2_t maskedoff, _Float16 src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmv_s_f_f16m1_tu (vfloat16m1_t maskedoff, _Float16 src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmv_s_f_f16m2_tu (vfloat16m2_t maskedoff, _Float16 src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmv_s_f_f16m4_tu (vfloat16m4_t maskedoff, _Float16 src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmv_s_f_f16m8_tu (vfloat16m8_t maskedoff, _Float16 src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( [[MERGE:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmv_s_f_f32mf2_tu(vfloat32mf2_t merge, float src, size_t vl) { - return vfmv_s_tu(merge, src, vl); +vfloat32mf2_t test_vfmv_s_f_f32mf2_tu (vfloat32mf2_t maskedoff, float src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmv_s_f_f32m1_tu (vfloat32m1_t maskedoff, float src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmv_s_f_f32m2_tu (vfloat32m2_t maskedoff, float src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmv_s_f_f32m4_tu (vfloat32m4_t maskedoff, float src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmv_s_f_f32m8_tu (vfloat32m8_t maskedoff, float src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmv_s_f_f64m1_tu (vfloat64m1_t maskedoff, double src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmv_s_f_f64m2_tu (vfloat64m2_t maskedoff, double src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmv_s_f_f64m4_tu (vfloat64m4_t maskedoff, double src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmv_s_f_f64m8_tu (vfloat64m8_t maskedoff, double src, size_t vl) { + return vfmv_s_tu(maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmv.c @@ -1,6 +1,7 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: riscv-registered-target -// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -target-feature +experimental-zvfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s #include @@ -10,7 +11,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) { +vint8mf8_t test_vmv_v_v_i8mf8 (vint8mf8_t src, size_t vl) { return vmv_v(src, vl); } @@ -19,7 +20,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) { +vint8mf4_t test_vmv_v_v_i8mf4 (vint8mf4_t src, size_t vl) { return vmv_v(src, vl); } @@ -28,7 +29,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) { +vint8mf2_t test_vmv_v_v_i8mf2 (vint8mf2_t src, size_t vl) { return vmv_v(src, vl); } @@ -37,35 +38,43 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) { return vmv_v(src, vl); } +vint8m1_t test_vmv_v_v_i8m1 (vint8m1_t src, size_t vl) { + return vmv_v(src, vl); +} // CHECK-RV64-LABEL: @test_vmv_v_v_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) { return vmv_v(src, vl); } +vint8m2_t test_vmv_v_v_i8m2 (vint8m2_t src, size_t vl) { + return vmv_v(src, vl); +} // CHECK-RV64-LABEL: @test_vmv_v_v_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) { return vmv_v(src, vl); } +vint8m4_t test_vmv_v_v_i8m4 (vint8m4_t src, size_t vl) { + return vmv_v(src, vl); +} // CHECK-RV64-LABEL: @test_vmv_v_v_i8m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) { return vmv_v(src, vl); } +vint8m8_t test_vmv_v_v_i8m8 (vint8m8_t src, size_t vl) { + return vmv_v(src, vl); +} // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) { +vint16mf4_t test_vmv_v_v_i16mf4 (vint16mf4_t src, size_t vl) { return vmv_v(src, vl); } @@ -74,7 +83,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) { +vint16mf2_t test_vmv_v_v_i16mf2 (vint16mf2_t src, size_t vl) { return vmv_v(src, vl); } @@ -83,7 +92,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) { +vint16m1_t test_vmv_v_v_i16m1 (vint16m1_t src, size_t vl) { return vmv_v(src, vl); } @@ -92,7 +101,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) { +vint16m2_t test_vmv_v_v_i16m2 (vint16m2_t src, size_t vl) { return vmv_v(src, vl); } @@ -101,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) { +vint16m4_t test_vmv_v_v_i16m4 (vint16m4_t src, size_t vl) { return vmv_v(src, vl); } @@ -110,7 +119,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) { +vint16m8_t test_vmv_v_v_i16m8 (vint16m8_t src, size_t vl) { return vmv_v(src, vl); } @@ -119,7 +128,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) { +vint32mf2_t test_vmv_v_v_i32mf2 (vint32mf2_t src, size_t vl) { return vmv_v(src, vl); } @@ -128,7 +137,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) { +vint32m1_t test_vmv_v_v_i32m1 (vint32m1_t src, size_t vl) { return vmv_v(src, vl); } @@ -137,7 +146,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) { +vint32m2_t test_vmv_v_v_i32m2 (vint32m2_t src, size_t vl) { return vmv_v(src, vl); } @@ -146,7 +155,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) { +vint32m4_t test_vmv_v_v_i32m4 (vint32m4_t src, size_t vl) { return vmv_v(src, vl); } @@ -155,7 +164,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) { +vint32m8_t test_vmv_v_v_i32m8 (vint32m8_t src, size_t vl) { return vmv_v(src, vl); } @@ -164,7 +173,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) { +vint64m1_t test_vmv_v_v_i64m1 (vint64m1_t src, size_t vl) { return vmv_v(src, vl); } @@ -173,7 +182,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) { +vint64m2_t test_vmv_v_v_i64m2 (vint64m2_t src, size_t vl) { return vmv_v(src, vl); } @@ -182,7 +191,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) { +vint64m4_t test_vmv_v_v_i64m4 (vint64m4_t src, size_t vl) { return vmv_v(src, vl); } @@ -191,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) { +vint64m8_t test_vmv_v_v_i64m8 (vint64m8_t src, size_t vl) { return vmv_v(src, vl); } @@ -200,7 +209,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) { +vuint8mf8_t test_vmv_v_v_u8mf8 (vuint8mf8_t src, size_t vl) { return vmv_v(src, vl); } @@ -209,7 +218,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) { +vuint8mf4_t test_vmv_v_v_u8mf4 (vuint8mf4_t src, size_t vl) { return vmv_v(src, vl); } @@ -218,7 +227,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) { +vuint8mf2_t test_vmv_v_v_u8mf2 (vuint8mf2_t src, size_t vl) { return vmv_v(src, vl); } @@ -227,7 +236,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) { +vuint8m1_t test_vmv_v_v_u8m1 (vuint8m1_t src, size_t vl) { return vmv_v(src, vl); } @@ -236,7 +245,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) { +vuint8m2_t test_vmv_v_v_u8m2 (vuint8m2_t src, size_t vl) { return vmv_v(src, vl); } @@ -245,7 +254,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) { +vuint8m4_t test_vmv_v_v_u8m4 (vuint8m4_t src, size_t vl) { return vmv_v(src, vl); } @@ -254,7 +263,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) { +vuint8m8_t test_vmv_v_v_u8m8 (vuint8m8_t src, size_t vl) { return vmv_v(src, vl); } @@ -263,7 +272,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) { +vuint16mf4_t test_vmv_v_v_u16mf4 (vuint16mf4_t src, size_t vl) { return vmv_v(src, vl); } @@ -272,7 +281,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) { +vuint16mf2_t test_vmv_v_v_u16mf2 (vuint16mf2_t src, size_t vl) { return vmv_v(src, vl); } @@ -281,7 +290,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) { +vuint16m1_t test_vmv_v_v_u16m1 (vuint16m1_t src, size_t vl) { return vmv_v(src, vl); } @@ -290,7 +299,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) { +vuint16m2_t test_vmv_v_v_u16m2 (vuint16m2_t src, size_t vl) { return vmv_v(src, vl); } @@ -299,7 +308,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) { +vuint16m4_t test_vmv_v_v_u16m4 (vuint16m4_t src, size_t vl) { return vmv_v(src, vl); } @@ -308,7 +317,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) { +vuint16m8_t test_vmv_v_v_u16m8 (vuint16m8_t src, size_t vl) { return vmv_v(src, vl); } @@ -317,7 +326,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) { +vuint32mf2_t test_vmv_v_v_u32mf2 (vuint32mf2_t src, size_t vl) { return vmv_v(src, vl); } @@ -326,7 +335,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) { +vuint32m1_t test_vmv_v_v_u32m1 (vuint32m1_t src, size_t vl) { return vmv_v(src, vl); } @@ -335,7 +344,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) { +vuint32m2_t test_vmv_v_v_u32m2 (vuint32m2_t src, size_t vl) { return vmv_v(src, vl); } @@ -344,7 +353,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) { +vuint32m4_t test_vmv_v_v_u32m4 (vuint32m4_t src, size_t vl) { return vmv_v(src, vl); } @@ -353,7 +362,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) { +vuint32m8_t test_vmv_v_v_u32m8 (vuint32m8_t src, size_t vl) { return vmv_v(src, vl); } @@ -362,7 +371,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) { +vuint64m1_t test_vmv_v_v_u64m1 (vuint64m1_t src, size_t vl) { return vmv_v(src, vl); } @@ -371,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) { +vuint64m2_t test_vmv_v_v_u64m2 (vuint64m2_t src, size_t vl) { return vmv_v(src, vl); } @@ -380,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) { +vuint64m4_t test_vmv_v_v_u64m4 (vuint64m4_t src, size_t vl) { return vmv_v(src, vl); } @@ -389,7 +398,61 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) { +vuint64m8_t test_vmv_v_v_u64m8 (vuint64m8_t src, size_t vl) { + return vmv_v(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmv_v_v_f16mf4 (vfloat16mf4_t src, size_t vl) { + return vmv_v(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmv_v_v_f16mf2 (vfloat16mf2_t src, size_t vl) { + return vmv_v(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmv_v_v_f16m1 (vfloat16m1_t src, size_t vl) { + return vmv_v(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmv_v_v_f16m2 (vfloat16m2_t src, size_t vl) { + return vmv_v(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmv_v_v_f16m4 (vfloat16m4_t src, size_t vl) { + return vmv_v(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmv_v_v_f16m8 (vfloat16m8_t src, size_t vl) { return vmv_v(src, vl); } @@ -398,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) { +vfloat32mf2_t test_vmv_v_v_f32mf2 (vfloat32mf2_t src, size_t vl) { return vmv_v(src, vl); } @@ -407,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) { +vfloat32m1_t test_vmv_v_v_f32m1 (vfloat32m1_t src, size_t vl) { return vmv_v(src, vl); } @@ -416,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) { +vfloat32m2_t test_vmv_v_v_f32m2 (vfloat32m2_t src, size_t vl) { return vmv_v(src, vl); } @@ -425,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) { +vfloat32m4_t test_vmv_v_v_f32m4 (vfloat32m4_t src, size_t vl) { return vmv_v(src, vl); } @@ -434,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) { +vfloat32m8_t test_vmv_v_v_f32m8 (vfloat32m8_t src, size_t vl) { return vmv_v(src, vl); } @@ -443,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) { +vfloat64m1_t test_vmv_v_v_f64m1 (vfloat64m1_t src, size_t vl) { return vmv_v(src, vl); } @@ -452,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) { +vfloat64m2_t test_vmv_v_v_f64m2 (vfloat64m2_t src, size_t vl) { return vmv_v(src, vl); } @@ -461,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) { +vfloat64m4_t test_vmv_v_v_f64m4 (vfloat64m4_t src, size_t vl) { return vmv_v(src, vl); } @@ -470,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) { +vfloat64m8_t test_vmv_v_v_f64m8 (vfloat64m8_t src, size_t vl) { return vmv_v(src, vl); } @@ -479,15 +542,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vmv_s_x_i8mf8(vint8mf8_t dst, int8_t src, size_t vl) { - return vmv_s(dst, src, vl); +int8_t test_vmv_x_s_i8mf8_i8 (vint8mf8_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf4_i8( @@ -495,15 +551,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vmv_s_x_i8mf4(vint8mf4_t dst, int8_t src, size_t vl) { - return vmv_s(dst, src, vl); +int8_t test_vmv_x_s_i8mf4_i8 (vint8mf4_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf2_i8( @@ -511,15 +560,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vmv_s_x_i8mf2(vint8mf2_t dst, int8_t src, size_t vl) { - return vmv_s(dst, src, vl); +int8_t test_vmv_x_s_i8mf2_i8 (vint8mf2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8m1_i8( @@ -527,15 +569,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vmv_s_x_i8m1(vint8m1_t dst, int8_t src, size_t vl) { - return vmv_s(dst, src, vl); +int8_t test_vmv_x_s_i8m1_i8 (vint8m1_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8m2_i8( @@ -543,15 +578,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vmv_s_x_i8m2(vint8m2_t dst, int8_t src, size_t vl) { - return vmv_s(dst, src, vl); +int8_t test_vmv_x_s_i8m2_i8 (vint8m2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8m4_i8( @@ -559,15 +587,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vmv_s_x_i8m4(vint8m4_t dst, int8_t src, size_t vl) { - return vmv_s(dst, src, vl); +int8_t test_vmv_x_s_i8m4_i8 (vint8m4_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8m8_i8( @@ -575,15 +596,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i8m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m8_t test_vmv_s_x_i8m8(vint8m8_t dst, int8_t src, size_t vl) { - return vmv_s(dst, src, vl); +int8_t test_vmv_x_s_i8m8_i8 (vint8m8_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf4_i16( @@ -591,15 +605,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vmv_s_x_i16mf4(vint16mf4_t dst, int16_t src, size_t vl) { - return vmv_s(dst, src, vl); +int16_t test_vmv_x_s_i16mf4_i16 (vint16mf4_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf2_i16( @@ -607,15 +614,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vmv_s_x_i16mf2(vint16mf2_t dst, int16_t src, size_t vl) { - return vmv_s(dst, src, vl); +int16_t test_vmv_x_s_i16mf2_i16 (vint16mf2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16m1_i16( @@ -623,15 +623,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vmv_s_x_i16m1(vint16m1_t dst, int16_t src, size_t vl) { - return vmv_s(dst, src, vl); +int16_t test_vmv_x_s_i16m1_i16 (vint16m1_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16m2_i16( @@ -639,15 +632,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vmv_s_x_i16m2(vint16m2_t dst, int16_t src, size_t vl) { - return vmv_s(dst, src, vl); +int16_t test_vmv_x_s_i16m2_i16 (vint16m2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16m4_i16( @@ -655,15 +641,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vmv_s_x_i16m4(vint16m4_t dst, int16_t src, size_t vl) { - return vmv_s(dst, src, vl); +int16_t test_vmv_x_s_i16m4_i16 (vint16m4_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16m8_i16( @@ -671,15 +650,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vmv_s_x_i16m8(vint16m8_t dst, int16_t src, size_t vl) { - return vmv_s(dst, src, vl); +int16_t test_vmv_x_s_i16m8_i16 (vint16m8_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32mf2_i32( @@ -687,15 +659,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vmv_s_x_i32mf2(vint32mf2_t dst, int32_t src, size_t vl) { - return vmv_s(dst, src, vl); +int32_t test_vmv_x_s_i32mf2_i32 (vint32mf2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32m1_i32( @@ -703,15 +668,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vmv_s_x_i32m1(vint32m1_t dst, int32_t src, size_t vl) { - return vmv_s(dst, src, vl); +int32_t test_vmv_x_s_i32m1_i32 (vint32m1_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32m2_i32( @@ -719,15 +677,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vmv_s_x_i32m2(vint32m2_t dst, int32_t src, size_t vl) { - return vmv_s(dst, src, vl); +int32_t test_vmv_x_s_i32m2_i32 (vint32m2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32m4_i32( @@ -735,15 +686,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vmv_s_x_i32m4(vint32m4_t dst, int32_t src, size_t vl) { - return vmv_s(dst, src, vl); +int32_t test_vmv_x_s_i32m4_i32 (vint32m4_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32m8_i32( @@ -751,15 +695,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vmv_s_x_i32m8(vint32m8_t dst, int32_t src, size_t vl) { - return vmv_s(dst, src, vl); +int32_t test_vmv_x_s_i32m8_i32 (vint32m8_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i64m1_i64( @@ -767,15 +704,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vmv_s_x_i64m1(vint64m1_t dst, int64_t src, size_t vl) { - return vmv_s(dst, src, vl); +int64_t test_vmv_x_s_i64m1_i64 (vint64m1_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i64m2_i64( @@ -783,15 +713,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vmv_s_x_i64m2(vint64m2_t dst, int64_t src, size_t vl) { - return vmv_s(dst, src, vl); +int64_t test_vmv_x_s_i64m2_i64 (vint64m2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i64m4_i64( @@ -799,15 +722,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vmv_s_x_i64m4(vint64m4_t dst, int64_t src, size_t vl) { - return vmv_s(dst, src, vl); +int64_t test_vmv_x_s_i64m4_i64 (vint64m4_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_i64m8_i64( @@ -815,15 +731,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_i64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vmv_s_x_i64m8(vint64m8_t dst, int64_t src, size_t vl) { - return vmv_s(dst, src, vl); +int64_t test_vmv_x_s_i64m8_i64 (vint64m8_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf8_u8( @@ -831,15 +740,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vmv_s_x_u8mf8(vuint8mf8_t dst, uint8_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint8_t test_vmv_x_s_u8mf8_u8 (vuint8mf8_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf4_u8( @@ -847,15 +749,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vmv_s_x_u8mf4(vuint8mf4_t dst, uint8_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint8_t test_vmv_x_s_u8mf4_u8 (vuint8mf4_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf2_u8( @@ -863,15 +758,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vmv_s_x_u8mf2(vuint8mf2_t dst, uint8_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint8_t test_vmv_x_s_u8mf2_u8 (vuint8mf2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8m1_u8( @@ -879,15 +767,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vmv_s_x_u8m1(vuint8m1_t dst, uint8_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint8_t test_vmv_x_s_u8m1_u8 (vuint8m1_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8m2_u8( @@ -895,15 +776,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vmv_s_x_u8m2(vuint8m2_t dst, uint8_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint8_t test_vmv_x_s_u8m2_u8 (vuint8m2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8m4_u8( @@ -911,15 +785,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vmv_s_x_u8m4(vuint8m4_t dst, uint8_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint8_t test_vmv_x_s_u8m4_u8 (vuint8m4_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8m8_u8( @@ -927,15 +794,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u8m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m8_t test_vmv_s_x_u8m8(vuint8m8_t dst, uint8_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint8_t test_vmv_x_s_u8m8_u8 (vuint8m8_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf4_u16( @@ -943,15 +803,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vmv_s_x_u16mf4(vuint16mf4_t dst, uint16_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint16_t test_vmv_x_s_u16mf4_u16 (vuint16mf4_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf2_u16( @@ -959,15 +812,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vmv_s_x_u16mf2(vuint16mf2_t dst, uint16_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint16_t test_vmv_x_s_u16mf2_u16 (vuint16mf2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16m1_u16( @@ -975,15 +821,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vmv_s_x_u16m1(vuint16m1_t dst, uint16_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint16_t test_vmv_x_s_u16m1_u16 (vuint16m1_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16m2_u16( @@ -991,15 +830,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vmv_s_x_u16m2(vuint16m2_t dst, uint16_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint16_t test_vmv_x_s_u16m2_u16 (vuint16m2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16m4_u16( @@ -1007,15 +839,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vmv_s_x_u16m4(vuint16m4_t dst, uint16_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint16_t test_vmv_x_s_u16m4_u16 (vuint16m4_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16m8_u16( @@ -1023,15 +848,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vmv_s_x_u16m8(vuint16m8_t dst, uint16_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint16_t test_vmv_x_s_u16m8_u16 (vuint16m8_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32mf2_u32( @@ -1039,15 +857,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vmv_s_x_u32mf2(vuint32mf2_t dst, uint32_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint32_t test_vmv_x_s_u32mf2_u32 (vuint32mf2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32m1_u32( @@ -1055,15 +866,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vmv_s_x_u32m1(vuint32m1_t dst, uint32_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint32_t test_vmv_x_s_u32m1_u32 (vuint32m1_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32m2_u32( @@ -1071,15 +875,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vmv_s_x_u32m2(vuint32m2_t dst, uint32_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint32_t test_vmv_x_s_u32m2_u32 (vuint32m2_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32m4_u32( @@ -1087,15 +884,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vmv_s_x_u32m4(vuint32m4_t dst, uint32_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint32_t test_vmv_x_s_u32m4_u32 (vuint32m4_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32m8_u32( @@ -1103,15 +893,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vmv_s_x_u32m8(vuint32m8_t dst, uint32_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint32_t test_vmv_x_s_u32m8_u32 (vuint32m8_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u64m1_u64( @@ -1119,15 +902,8 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) { return vmv_x(src); } - -// CHECK-RV64-LABEL: @test_vmv_s_x_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vmv_s_x_u64m1(vuint64m1_t dst, uint64_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint64_t test_vmv_x_s_u64m1_u64 (vuint64m1_t src) { + return vmv_x(src); } // CHECK-RV64-LABEL: @test_vmv_x_s_u64m2_u64( @@ -1135,135 +911,1878 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) { return vmv_x(src); } +uint64_t test_vmv_x_s_u64m2_u64 (vuint64m2_t src) { + return vmv_x(src); +} -// CHECK-RV64-LABEL: @test_vmv_s_x_u64m2( +// CHECK-RV64-LABEL: @test_vmv_x_s_u64m4_u64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] // -vuint64m2_t test_vmv_s_x_u64m2(vuint64m2_t dst, uint64_t src, size_t vl) { - return vmv_s(dst, src, vl); +uint64_t test_vmv_x_s_u64m4_u64 (vuint64m4_t src) { + return vmv_x(src); } -// CHECK-RV64-LABEL: @test_vmv_x_s_u64m4_u64( +// CHECK-RV64-LABEL: @test_vmv_x_s_u64m8_u64( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) { return vmv_x(src); } +uint64_t test_vmv_x_s_u64m8_u64 (vuint64m8_t src) { + return vmv_x(src); +} -// CHECK-RV64-LABEL: @test_vmv_s_x_u64m4( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmv_s_x_u64m4(vuint64m4_t dst, uint64_t src, size_t vl) { - return vmv_s(dst, src, vl); +vint8mf8_t test_vmv_v_v_i8mf8_tu (vint8mf8_t maskedoff, vint8mf8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_x_s_u64m8_u64( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) { return vmv_x(src); } +vint8mf8_t test_vmv_v_x_i8mf8_tu (vint8mf8_t maskedoff, int8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} -// CHECK-RV64-LABEL: @test_vmv_s_x_u64m8( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmv_s_x_u64m8(vuint64m8_t dst, uint64_t src, size_t vl) { - return vmv_s(dst, src, vl); +vint8mf4_t test_vmv_v_v_i8mf4_tu (vint8mf4_t maskedoff, vint8mf4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_v_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, size_t vl) { - return vmv_v_tu(merge, src, vl); +vint8mf4_t test_vmv_v_x_i8mf4_tu (vint8mf4_t maskedoff, int8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( [[MERGE:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_v_x_i32mf2_tu(vint32mf2_t merge, int32_t src, size_t vl) { - return vmv_v_tu(merge, src, vl); +vint8mf2_t test_vmv_v_v_i8mf2_tu (vint8mf2_t maskedoff, vint8mf2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_v_v_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, size_t vl) { - return vmv_v_tu(merge, src, vl); +vint8mf2_t test_vmv_v_x_i8mf2_tu (vint8mf2_t maskedoff, int8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( [[MERGE:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_v_x_u32mf2_tu(vuint32mf2_t merge, uint32_t src, size_t vl) { - return vmv_v_tu(merge, src, vl); +vint8m1_t test_vmv_v_v_i8m1_tu (vint8m1_t maskedoff, vint8m1_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_v_v_i32mf2_ta(vint32mf2_t src, size_t vl) { - return vmv_v_ta(src, vl); +vint8m1_t test_vmv_v_x_i8m1_tu (vint8m1_t maskedoff, int8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_v_v_u32mf2_ta(vuint32mf2_t src, size_t vl) { - return vmv_v_ta(src, vl); +vint8m2_t test_vmv_v_v_i8m2_tu (vint8m2_t maskedoff, vint8m2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmv_v_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t src, size_t vl) { - return vmv_v_tu(merge, src, vl); +vint8m2_t test_vmv_v_x_i8m2_tu (vint8m2_t maskedoff, int8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmv_v_v_f32mf2_ta(vfloat32mf2_t src, size_t vl) { - return vmv_v_ta(src, vl); +vint8m4_t test_vmv_v_v_i8m4_tu (vint8m4_t maskedoff, vint8m4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[MERGE:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_s_x_i32mf2_tu(vint32mf2_t merge, int32_t src, size_t vl) { - return vmv_s_tu(merge, src, vl); +vint8m4_t test_vmv_v_x_i8m4_tu (vint8m4_t maskedoff, int8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[MERGE:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmv_v_v_i8m8_tu (vint8m8_t maskedoff, vint8m8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv64i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmv_v_x_i8m8_tu (vint8m8_t maskedoff, int8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmv_v_v_i16mf4_tu (vint16mf4_t maskedoff, vint16mf4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmv_v_x_i16mf4_tu (vint16mf4_t maskedoff, int16_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmv_v_v_i16mf2_tu (vint16mf2_t maskedoff, vint16mf2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmv_v_x_i16mf2_tu (vint16mf2_t maskedoff, int16_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmv_v_v_i16m1_tu (vint16m1_t maskedoff, vint16m1_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmv_v_x_i16m1_tu (vint16m1_t maskedoff, int16_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmv_v_v_i16m2_tu (vint16m2_t maskedoff, vint16m2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmv_v_x_i16m2_tu (vint16m2_t maskedoff, int16_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmv_v_v_i16m4_tu (vint16m4_t maskedoff, vint16m4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmv_v_x_i16m4_tu (vint16m4_t maskedoff, int16_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmv_v_v_i16m8_tu (vint16m8_t maskedoff, vint16m8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmv_v_x_i16m8_tu (vint16m8_t maskedoff, int16_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmv_v_v_i32mf2_tu (vint32mf2_t maskedoff, vint32mf2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmv_v_x_i32mf2_tu (vint32mf2_t maskedoff, int32_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmv_v_v_i32m1_tu (vint32m1_t maskedoff, vint32m1_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmv_v_x_i32m1_tu (vint32m1_t maskedoff, int32_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmv_v_v_i32m2_tu (vint32m2_t maskedoff, vint32m2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmv_v_x_i32m2_tu (vint32m2_t maskedoff, int32_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmv_v_v_i32m4_tu (vint32m4_t maskedoff, vint32m4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmv_v_x_i32m4_tu (vint32m4_t maskedoff, int32_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmv_v_v_i32m8_tu (vint32m8_t maskedoff, vint32m8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmv_v_x_i32m8_tu (vint32m8_t maskedoff, int32_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmv_v_v_i64m1_tu (vint64m1_t maskedoff, vint64m1_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmv_v_x_i64m1_tu (vint64m1_t maskedoff, int64_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmv_v_v_i64m2_tu (vint64m2_t maskedoff, vint64m2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmv_v_x_i64m2_tu (vint64m2_t maskedoff, int64_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmv_v_v_i64m4_tu (vint64m4_t maskedoff, vint64m4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmv_v_x_i64m4_tu (vint64m4_t maskedoff, int64_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmv_v_v_i64m8_tu (vint64m8_t maskedoff, vint64m8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmv_v_x_i64m8_tu (vint64m8_t maskedoff, int64_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmv_v_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmv_v_x_u8mf8_tu (vuint8mf8_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmv_v_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmv_v_x_u8mf4_tu (vuint8mf4_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmv_v_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmv_v_x_u8mf2_tu (vuint8mf2_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmv_v_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmv_v_x_u8m1_tu (vuint8m1_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmv_v_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmv_v_x_u8m2_tu (vuint8m2_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmv_v_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmv_v_x_u8m4_tu (vuint8m4_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmv_v_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv64i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmv_v_x_u8m8_tu (vuint8m8_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmv_v_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmv_v_x_u16mf4_tu (vuint16mf4_t maskedoff, uint16_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmv_v_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmv_v_x_u16mf2_tu (vuint16mf2_t maskedoff, uint16_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmv_v_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmv_v_x_u16m1_tu (vuint16m1_t maskedoff, uint16_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmv_v_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmv_v_x_u16m2_tu (vuint16m2_t maskedoff, uint16_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmv_v_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmv_v_x_u16m4_tu (vuint16m4_t maskedoff, uint16_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmv_v_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmv_v_x_u16m8_tu (vuint16m8_t maskedoff, uint16_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmv_v_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmv_v_x_u32mf2_tu (vuint32mf2_t maskedoff, uint32_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmv_v_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmv_v_x_u32m1_tu (vuint32m1_t maskedoff, uint32_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmv_v_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmv_v_x_u32m2_tu (vuint32m2_t maskedoff, uint32_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmv_v_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmv_v_x_u32m4_tu (vuint32m4_t maskedoff, uint32_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmv_v_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmv_v_x_u32m8_tu (vuint32m8_t maskedoff, uint32_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmv_v_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmv_v_x_u64m1_tu (vuint64m1_t maskedoff, uint64_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmv_v_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmv_v_x_u64m2_tu (vuint64m2_t maskedoff, uint64_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmv_v_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmv_v_x_u64m4_tu (vuint64m4_t maskedoff, uint64_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmv_v_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmv_v_x_u64m8_tu (vuint64m8_t maskedoff, uint64_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmv_v_v_i8mf8_ta (vint8mf8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmv_v_v_i8mf4_ta (vint8mf4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmv_v_v_i8mf2_ta (vint8mf2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmv_v_v_i8m1_ta (vint8m1_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmv_v_v_i8m2_ta (vint8m2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmv_v_v_i8m4_ta (vint8m4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmv_v_v_i8m8_ta (vint8m8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmv_v_v_i16mf4_ta (vint16mf4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmv_v_v_i16mf2_ta (vint16mf2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmv_v_v_i16m1_ta (vint16m1_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmv_v_v_i16m2_ta (vint16m2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmv_v_v_i16m4_ta (vint16m4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmv_v_v_i16m8_ta (vint16m8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmv_v_v_i32mf2_ta (vint32mf2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmv_v_v_i32m1_ta (vint32m1_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmv_v_v_i32m2_ta (vint32m2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmv_v_v_i32m4_ta (vint32m4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmv_v_v_i32m8_ta (vint32m8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmv_v_v_i64m1_ta (vint64m1_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmv_v_v_i64m2_ta (vint64m2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmv_v_v_i64m4_ta (vint64m4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmv_v_v_i64m8_ta (vint64m8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmv_v_v_u8mf8_ta (vuint8mf8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmv_v_v_u8mf4_ta (vuint8mf4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmv_v_v_u8mf2_ta (vuint8mf2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmv_v_v_u8m1_ta (vuint8m1_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmv_v_v_u8m2_ta (vuint8m2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmv_v_v_u8m4_ta (vuint8m4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmv_v_v_u8m8_ta (vuint8m8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmv_v_v_u16mf4_ta (vuint16mf4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmv_v_v_u16mf2_ta (vuint16mf2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmv_v_v_u16m1_ta (vuint16m1_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmv_v_v_u16m2_ta (vuint16m2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmv_v_v_u16m4_ta (vuint16m4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmv_v_v_u16m8_ta (vuint16m8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmv_v_v_u32mf2_ta (vuint32mf2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmv_v_v_u32m1_ta (vuint32m1_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmv_v_v_u32m2_ta (vuint32m2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmv_v_v_u32m4_ta (vuint32m4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmv_v_v_u32m8_ta (vuint32m8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmv_v_v_u64m1_ta (vuint64m1_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmv_v_v_u64m2_ta (vuint64m2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmv_v_v_u64m4_ta (vuint64m4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmv_v_v_u64m8_ta (vuint64m8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmv_v_v_f16mf4_tu (vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmv_v_v_f16mf2_tu (vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmv_v_v_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmv_v_v_f16m2_tu (vfloat16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmv_v_v_f16m4_tu (vfloat16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmv_v_v_f16m8_tu (vfloat16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmv_v_v_f32mf2_tu (vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vmv_v_v_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vmv_v_v_f32m2_tu (vfloat32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vmv_v_v_f32m4_tu (vfloat32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vmv_v_v_f32m8_tu (vfloat32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vmv_v_v_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vmv_v_v_f64m2_tu (vfloat64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vmv_v_v_f64m4_tu (vfloat64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vmv_v_v_f64m8_tu (vfloat64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return vmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmv_v_v_f16mf4_ta (vfloat16mf4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmv_v_v_f16mf2_ta (vfloat16mf2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmv_v_v_f16m1_ta (vfloat16m1_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmv_v_v_f16m2_ta (vfloat16m2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmv_v_v_f16m4_ta (vfloat16m4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmv_v_v_f16m8_ta (vfloat16m8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmv_v_v_f32mf2_ta (vfloat32mf2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vmv_v_v_f32m1_ta (vfloat32m1_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vmv_v_v_f32m2_ta (vfloat32m2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vmv_v_v_f32m4_ta (vfloat32m4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vmv_v_v_f32m8_ta (vfloat32m8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vmv_v_v_f64m1_ta (vfloat64m1_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vmv_v_v_f64m2_ta (vfloat64m2_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vmv_v_v_f64m4_ta (vfloat64m4_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vmv_v_v_f64m8_ta (vfloat64m8_t src, size_t vl) { + return vmv_v_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmv_s_x_i8mf8_tu (vint8mf8_t maskedoff, int8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmv_s_x_i8mf4_tu (vint8mf4_t maskedoff, int8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmv_s_x_i8mf2_tu (vint8mf2_t maskedoff, int8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmv_s_x_i8m1_tu (vint8m1_t maskedoff, int8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmv_s_x_i8m2_tu (vint8m2_t maskedoff, int8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmv_s_x_i8m4_tu (vint8m4_t maskedoff, int8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmv_s_x_i8m8_tu (vint8m8_t maskedoff, int8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmv_s_x_i16mf4_tu (vint16mf4_t maskedoff, int16_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmv_s_x_i16mf2_tu (vint16mf2_t maskedoff, int16_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmv_s_x_i16m1_tu (vint16m1_t maskedoff, int16_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmv_s_x_i16m2_tu (vint16m2_t maskedoff, int16_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmv_s_x_i16m4_tu (vint16m4_t maskedoff, int16_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmv_s_x_i16m8_tu (vint16m8_t maskedoff, int16_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmv_s_x_i32mf2_tu (vint32mf2_t maskedoff, int32_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmv_s_x_i32m1_tu (vint32m1_t maskedoff, int32_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmv_s_x_i32m2_tu (vint32m2_t maskedoff, int32_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmv_s_x_i32m4_tu (vint32m4_t maskedoff, int32_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmv_s_x_i32m8_tu (vint32m8_t maskedoff, int32_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmv_s_x_i64m1_tu (vint64m1_t maskedoff, int64_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmv_s_x_i64m2_tu (vint64m2_t maskedoff, int64_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmv_s_x_i64m4_tu (vint64m4_t maskedoff, int64_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmv_s_x_i64m8_tu (vint64m8_t maskedoff, int64_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmv_s_x_u8mf8_tu (vuint8mf8_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmv_s_x_u8mf4_tu (vuint8mf4_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmv_s_x_u8mf2_tu (vuint8mf2_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmv_s_x_u8m1_tu (vuint8m1_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmv_s_x_u8m2_tu (vuint8m2_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmv_s_x_u8m4_tu (vuint8m4_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmv_s_x_u8m8_tu (vuint8m8_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmv_s_x_u16mf4_tu (vuint16mf4_t maskedoff, uint16_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmv_s_x_u16mf2_tu (vuint16mf2_t maskedoff, uint16_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmv_s_x_u16m1_tu (vuint16m1_t maskedoff, uint16_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmv_s_x_u16m2_tu (vuint16m2_t maskedoff, uint16_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmv_s_x_u16m4_tu (vuint16m4_t maskedoff, uint16_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmv_s_x_u16m8_tu (vuint16m8_t maskedoff, uint16_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmv_s_x_u32mf2_tu (vuint32mf2_t maskedoff, uint32_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmv_s_x_u32m1_tu (vuint32m1_t maskedoff, uint32_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmv_s_x_u32m2_tu (vuint32m2_t maskedoff, uint32_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmv_s_x_u32m4_tu (vuint32m4_t maskedoff, uint32_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmv_s_x_u32m8_tu (vuint32m8_t maskedoff, uint32_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmv_s_x_u64m1_tu (vuint64m1_t maskedoff, uint64_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmv_s_x_u64m2_tu (vuint64m2_t maskedoff, uint64_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmv_s_x_u64m4_tu (vuint64m4_t maskedoff, uint64_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_s_x_u32mf2_tu(vuint32mf2_t merge, uint32_t src, size_t vl) { - return vmv_s_tu(merge, src, vl); +vuint64m8_t test_vmv_s_x_u64m8_tu (vuint64m8_t maskedoff, uint64_t src, size_t vl) { + return vmv_s_tu(maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmv.c @@ -6,12 +6,66 @@ #include +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmv_v_f_f16mf4 (_Float16 src, size_t vl) { + return vfmv_v_f_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmv_v_f_f16mf2 (_Float16 src, size_t vl) { + return vfmv_v_f_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmv_v_f_f16m1 (_Float16 src, size_t vl) { + return vfmv_v_f_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmv_v_f_f16m2 (_Float16 src, size_t vl) { + return vfmv_v_f_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmv_v_f_f16m4 (_Float16 src, size_t vl) { + return vfmv_v_f_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmv_v_f_f16m8 (_Float16 src, size_t vl) { + return vfmv_v_f_f16m8(src, vl); +} + // CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmv_v_f_f32mf2(float src, size_t vl) { +vfloat32mf2_t test_vfmv_v_f_f32mf2 (float src, size_t vl) { return vfmv_v_f_f32mf2(src, vl); } @@ -20,7 +74,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmv_v_f_f32m1(float src, size_t vl) { +vfloat32m1_t test_vfmv_v_f_f32m1 (float src, size_t vl) { return vfmv_v_f_f32m1(src, vl); } @@ -29,7 +83,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmv_v_f_f32m2(float src, size_t vl) { +vfloat32m2_t test_vfmv_v_f_f32m2 (float src, size_t vl) { return vfmv_v_f_f32m2(src, vl); } @@ -38,7 +92,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmv_v_f_f32m4(float src, size_t vl) { +vfloat32m4_t test_vfmv_v_f_f32m4 (float src, size_t vl) { return vfmv_v_f_f32m4(src, vl); } @@ -47,7 +101,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmv_v_f_f32m8(float src, size_t vl) { +vfloat32m8_t test_vfmv_v_f_f32m8 (float src, size_t vl) { return vfmv_v_f_f32m8(src, vl); } @@ -56,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmv_v_f_f64m1(double src, size_t vl) { +vfloat64m1_t test_vfmv_v_f_f64m1 (double src, size_t vl) { return vfmv_v_f_f64m1(src, vl); } @@ -65,7 +119,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmv_v_f_f64m2(double src, size_t vl) { +vfloat64m2_t test_vfmv_v_f_f64m2 (double src, size_t vl) { return vfmv_v_f_f64m2(src, vl); } @@ -74,7 +128,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmv_v_f_f64m4(double src, size_t vl) { +vfloat64m4_t test_vfmv_v_f_f64m4 (double src, size_t vl) { return vfmv_v_f_f64m4(src, vl); } @@ -83,26 +137,134 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmv_v_f_f64m8(double src, size_t vl) { +vfloat64m8_t test_vfmv_v_f_f64m8 (double src, size_t vl) { return vfmv_v_f_f64m8(src, vl); } +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf4_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv1f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16mf4_f16 (vfloat16mf4_t src) { + return vfmv_f_s_f16mf4_f16(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmv_s_f_f16mf4 (_Float16 src, size_t vl) { + return vfmv_s_f_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf2_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv2f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16mf2_f16 (vfloat16mf2_t src) { + return vfmv_f_s_f16mf2_f16(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmv_s_f_f16mf2 (_Float16 src, size_t vl) { + return vfmv_s_f_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m1_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv4f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m1_f16 (vfloat16m1_t src) { + return vfmv_f_s_f16m1_f16(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmv_s_f_f16m1 (_Float16 src, size_t vl) { + return vfmv_s_f_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m2_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv8f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m2_f16 (vfloat16m2_t src) { + return vfmv_f_s_f16m2_f16(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmv_s_f_f16m2 (_Float16 src, size_t vl) { + return vfmv_s_f_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m4_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv16f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m4_f16 (vfloat16m4_t src) { + return vfmv_f_s_f16m4_f16(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmv_s_f_f16m4 (_Float16 src, size_t vl) { + return vfmv_s_f_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m8_f16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv32f16( [[SRC:%.*]]) +// CHECK-RV64-NEXT: ret half [[TMP0]] +// +_Float16 test_vfmv_f_s_f16m8_f16 (vfloat16m8_t src) { + return vfmv_f_s_f16m8_f16(src); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmv_s_f_f16m8 (_Float16 src, size_t vl) { + return vfmv_s_f_f16m8(src, vl); +} + // CHECK-RV64-LABEL: @test_vfmv_f_s_f32mf2_f32( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv1f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) { +float test_vfmv_f_s_f32mf2_f32 (vfloat32mf2_t src) { return vfmv_f_s_f32mf2_f32(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmv_s_f_f32mf2(vfloat32mf2_t dst, float src, size_t vl) { - return vfmv_s_f_f32mf2(dst, src, vl); +vfloat32mf2_t test_vfmv_s_f_f32mf2 (float src, size_t vl) { + return vfmv_s_f_f32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m1_f32( @@ -110,17 +272,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv2f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) { +float test_vfmv_f_s_f32m1_f32 (vfloat32m1_t src) { return vfmv_f_s_f32m1_f32(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmv_s_f_f32m1(vfloat32m1_t dst, float src, size_t vl) { - return vfmv_s_f_f32m1(dst, src, vl); +vfloat32m1_t test_vfmv_s_f_f32m1 (float src, size_t vl) { + return vfmv_s_f_f32m1(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m2_f32( @@ -128,17 +290,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv4f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) { +float test_vfmv_f_s_f32m2_f32 (vfloat32m2_t src) { return vfmv_f_s_f32m2_f32(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmv_s_f_f32m2(vfloat32m2_t dst, float src, size_t vl) { - return vfmv_s_f_f32m2(dst, src, vl); +vfloat32m2_t test_vfmv_s_f_f32m2 (float src, size_t vl) { + return vfmv_s_f_f32m2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m4_f32( @@ -146,17 +308,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv8f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) { +float test_vfmv_f_s_f32m4_f32 (vfloat32m4_t src) { return vfmv_f_s_f32m4_f32(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmv_s_f_f32m4(vfloat32m4_t dst, float src, size_t vl) { - return vfmv_s_f_f32m4(dst, src, vl); +vfloat32m4_t test_vfmv_s_f_f32m4 (float src, size_t vl) { + return vfmv_s_f_f32m4(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m8_f32( @@ -164,17 +326,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv16f32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret float [[TMP0]] // -float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) { +float test_vfmv_f_s_f32m8_f32 (vfloat32m8_t src) { return vfmv_f_s_f32m8_f32(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i64( [[DST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmv_s_f_f32m8(vfloat32m8_t dst, float src, size_t vl) { - return vfmv_s_f_f32m8(dst, src, vl); +vfloat32m8_t test_vfmv_s_f_f32m8 (float src, size_t vl) { + return vfmv_s_f_f32m8(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m1_f64( @@ -182,17 +344,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv1f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // -double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) { +double test_vfmv_f_s_f64m1_f64 (vfloat64m1_t src) { return vfmv_f_s_f64m1_f64(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmv_s_f_f64m1(vfloat64m1_t dst, double src, size_t vl) { - return vfmv_s_f_f64m1(dst, src, vl); +vfloat64m1_t test_vfmv_s_f_f64m1 (double src, size_t vl) { + return vfmv_s_f_f64m1(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m2_f64( @@ -200,17 +362,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv2f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // -double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) { +double test_vfmv_f_s_f64m2_f64 (vfloat64m2_t src) { return vfmv_f_s_f64m2_f64(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmv_s_f_f64m2(vfloat64m2_t dst, double src, size_t vl) { - return vfmv_s_f_f64m2(dst, src, vl); +vfloat64m2_t test_vfmv_s_f_f64m2 (double src, size_t vl) { + return vfmv_s_f_f64m2(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m4_f64( @@ -218,17 +380,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv4f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // -double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) { +double test_vfmv_f_s_f64m4_f64 (vfloat64m4_t src) { return vfmv_f_s_f64m4_f64(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmv_s_f_f64m4(vfloat64m4_t dst, double src, size_t vl) { - return vfmv_s_f_f64m4(dst, src, vl); +vfloat64m4_t test_vfmv_s_f_f64m4 (double src, size_t vl) { + return vfmv_s_f_f64m4(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m8_f64( @@ -236,206 +398,476 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv8f64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret double [[TMP0]] // -double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) { +double test_vfmv_f_s_f64m8_f64 (vfloat64m8_t src) { return vfmv_f_s_f64m8_f64(src); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i64( [[DST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmv_s_f_f64m8(vfloat64m8_t dst, double src, size_t vl) { - return vfmv_s_f_f64m8(dst, src, vl); +vfloat64m8_t test_vfmv_s_f_f64m8 (double src, size_t vl) { + return vfmv_s_f_f64m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf4( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmv_v_f_f16mf4_tu (vfloat16mf4_t maskedoff, _Float16 src, size_t vl) { + return vfmv_v_f_f16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmv_v_f_f16mf2_tu (vfloat16mf2_t maskedoff, _Float16 src, size_t vl) { + return vfmv_v_f_f16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmv_v_f_f16m1_tu (vfloat16m1_t maskedoff, _Float16 src, size_t vl) { + return vfmv_v_f_f16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmv_v_f_f16m2_tu (vfloat16m2_t maskedoff, _Float16 src, size_t vl) { + return vfmv_v_f_f16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmv_v_f_f16m4_tu (vfloat16m4_t maskedoff, _Float16 src, size_t vl) { + return vfmv_v_f_f16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmv_v_f_f16m8_tu (vfloat16m8_t maskedoff, _Float16 src, size_t vl) { + return vfmv_v_f_f16m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmv_v_f_f32mf2_tu (vfloat32mf2_t maskedoff, float src, size_t vl) { + return vfmv_v_f_f32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmv_v_f_f32m1_tu (vfloat32m1_t maskedoff, float src, size_t vl) { + return vfmv_v_f_f32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmv_v_f_f32m2_tu (vfloat32m2_t maskedoff, float src, size_t vl) { + return vfmv_v_f_f32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmv_v_f_f32m4_tu (vfloat32m4_t maskedoff, float src, size_t vl) { + return vfmv_v_f_f32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmv_v_f_f32m8_tu (vfloat32m8_t maskedoff, float src, size_t vl) { + return vfmv_v_f_f32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmv_v_f_f64m1_tu (vfloat64m1_t maskedoff, double src, size_t vl) { + return vfmv_v_f_f64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmv_v_f_f64m2_tu (vfloat64m2_t maskedoff, double src, size_t vl) { + return vfmv_v_f_f64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmv_v_f_f64m4_tu (vfloat64m4_t maskedoff, double src, size_t vl) { + return vfmv_v_f_f64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmv_v_f_f64m8_tu (vfloat64m8_t maskedoff, double src, size_t vl) { + return vfmv_v_f_f64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf4_ta( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmv_v_f_f16mf4 (_Float16 src, size_t vl) { - return vfmv_v_f_f16mf4(src, vl); +vfloat16mf4_t test_vfmv_v_f_f16mf4_ta (_Float16 src, size_t vl) { + return vfmv_v_f_f16mf4_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16mf2_ta( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmv_v_f_f16mf2 (_Float16 src, size_t vl) { - return vfmv_v_f_f16mf2(src, vl); +vfloat16mf2_t test_vfmv_v_f_f16mf2_ta (_Float16 src, size_t vl) { + return vfmv_v_f_f16mf2_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m1_ta( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmv_v_f_f16m1 (_Float16 src, size_t vl) { - return vfmv_v_f_f16m1(src, vl); +vfloat16m1_t test_vfmv_v_f_f16m1_ta (_Float16 src, size_t vl) { + return vfmv_v_f_f16m1_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m2_ta( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmv_v_f_f16m2 (_Float16 src, size_t vl) { - return vfmv_v_f_f16m2(src, vl); +vfloat16m2_t test_vfmv_v_f_f16m2_ta (_Float16 src, size_t vl) { + return vfmv_v_f_f16m2_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m4_ta( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmv_v_f_f16m4 (_Float16 src, size_t vl) { - return vfmv_v_f_f16m4(src, vl); +vfloat16m4_t test_vfmv_v_f_f16m4_ta (_Float16 src, size_t vl) { + return vfmv_v_f_f16m4_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f16m8_ta( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmv_v_f_f16m8 (_Float16 src, size_t vl) { - return vfmv_v_f_f16m8(src, vl); +vfloat16m8_t test_vfmv_v_f_f16m8_ta (_Float16 src, size_t vl) { + return vfmv_v_f_f16m8_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf4_f16( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv1f16( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret half [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -_Float16 test_vfmv_f_s_f16mf4_f16 (vfloat16mf4_t src) { - return vfmv_f_s_f16mf4_f16(src); +vfloat32mf2_t test_vfmv_v_f_f32mf2_ta (float src, size_t vl) { + return vfmv_v_f_f32mf2_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vfmv_s_f_f16mf4 (vfloat16mf4_t dest, _Float16 src, size_t vl) { - return vfmv_s_f_f16mf4(dest, src, vl); +vfloat32m1_t test_vfmv_v_f_f32m1_ta (float src, size_t vl) { + return vfmv_v_f_f32m1_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf2_f16( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv2f16( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret half [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -_Float16 test_vfmv_f_s_f16mf2_f16 (vfloat16mf2_t src) { - return vfmv_f_s_f16mf2_f16(src); +vfloat32m2_t test_vfmv_v_f_f32m2_ta (float src, size_t vl) { + return vfmv_v_f_f32m2_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vfmv_s_f_f16mf2 (vfloat16mf2_t dest, _Float16 src, size_t vl) { - return vfmv_s_f_f16mf2(dest, src, vl); +vfloat32m4_t test_vfmv_v_f_f32m4_ta (float src, size_t vl) { + return vfmv_v_f_f32m4_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m1_f16( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f32m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv4f16( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret half [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -_Float16 test_vfmv_f_s_f16m1_f16 (vfloat16m1_t src) { - return vfmv_f_s_f16m1_f16(src); +vfloat32m8_t test_vfmv_v_f_f32m8_ta (float src, size_t vl) { + return vfmv_v_f_f32m8_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m1_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmv_s_f_f16m1 (vfloat16m1_t dest, _Float16 src, size_t vl) { - return vfmv_s_f_f16m1(dest, src, vl); +vfloat64m1_t test_vfmv_v_f_f64m1_ta (double src, size_t vl) { + return vfmv_v_f_f64m1_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m2_f16( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m2_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv8f16( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret half [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -_Float16 test_vfmv_f_s_f16m2_f16 (vfloat16m2_t src) { - return vfmv_f_s_f16m2_f16(src); +vfloat64m2_t test_vfmv_v_f_f64m2_ta (double src, size_t vl) { + return vfmv_v_f_f64m2_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m4_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmv_s_f_f16m2 (vfloat16m2_t dest, _Float16 src, size_t vl) { - return vfmv_s_f_f16m2(dest, src, vl); +vfloat64m4_t test_vfmv_v_f_f64m4_ta (double src, size_t vl) { + return vfmv_v_f_f64m4_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m4_f16( +// CHECK-RV64-LABEL: @test_vfmv_v_f_f64m8_ta( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv16f16( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret half [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -_Float16 test_vfmv_f_s_f16m4_f16 (vfloat16m4_t src) { - return vfmv_f_s_f16m4_f16(src); +vfloat64m8_t test_vfmv_v_f_f64m8_ta (double src, size_t vl) { + return vfmv_v_f_f64m8_ta(src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4( +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmv_s_f_f16m4 (vfloat16m4_t dest, _Float16 src, size_t vl) { - return vfmv_s_f_f16m4(dest, src, vl); +vfloat16mf4_t test_vfmv_s_f_f16mf4_tu (vfloat16mf4_t maskedoff, _Float16 src, size_t vl) { + return vfmv_s_f_f16mf4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_f_s_f16m8_f16( +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv32f16( [[SRC:%.*]]) -// CHECK-RV64-NEXT: ret half [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -_Float16 test_vfmv_f_s_f16m8_f16 (vfloat16m8_t src) { - return vfmv_f_s_f16m8_f16(src); +vfloat16mf2_t test_vfmv_s_f_f16mf2_tu (vfloat16mf2_t maskedoff, _Float16 src, size_t vl) { + return vfmv_s_f_f16mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8( +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32f16.i64( [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmv_s_f_f16m8 (vfloat16m8_t dest, _Float16 src, size_t vl) { - return vfmv_s_f_f16m8(dest, src, vl); +vfloat16m1_t test_vfmv_s_f_f16m1_tu (vfloat16m1_t maskedoff, _Float16 src, size_t vl) { + return vfmv_s_f_f16m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f32.i64( [[MERGE:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmv_v_f_f32mf2_tu (vfloat32mf2_t merge, float src, size_t vl) { - return vfmv_v_f_f32mf2_tu(merge, src, vl); +vfloat16m2_t test_vfmv_s_f_f16m2_tu (vfloat16m2_t maskedoff, _Float16 src, size_t vl) { + return vfmv_s_f_f16m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfmv_v_f_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmv_v_f_f32mf2_ta (float src, size_t vl) { - return vfmv_v_f_f32mf2_ta(src, vl); +vfloat16m4_t test_vfmv_s_f_f16m4_tu (vfloat16m4_t maskedoff, _Float16 src, size_t vl) { + return vfmv_s_f_f16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32f16.i64( [[MASKEDOFF:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmv_s_f_f16m8_tu (vfloat16m8_t maskedoff, _Float16 src, size_t vl) { + return vfmv_s_f_f16m8_tu(maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( [[MERGE:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vfmv_s_f_f32mf2_tu (vfloat32mf2_t merge, float src, size_t vl) { - return vfmv_s_f_f32mf2_tu(merge, src, vl); +vfloat32mf2_t test_vfmv_s_f_f32mf2_tu (vfloat32mf2_t maskedoff, float src, size_t vl) { + return vfmv_s_f_f32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmv_s_f_f32m1_tu (vfloat32m1_t maskedoff, float src, size_t vl) { + return vfmv_s_f_f32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmv_s_f_f32m2_tu (vfloat32m2_t maskedoff, float src, size_t vl) { + return vfmv_s_f_f32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmv_s_f_f32m4_tu (vfloat32m4_t maskedoff, float src, size_t vl) { + return vfmv_s_f_f32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i64( [[MASKEDOFF:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmv_s_f_f32m8_tu (vfloat32m8_t maskedoff, float src, size_t vl) { + return vfmv_s_f_f32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmv_s_f_f64m1_tu (vfloat64m1_t maskedoff, double src, size_t vl) { + return vfmv_s_f_f64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmv_s_f_f64m2_tu (vfloat64m2_t maskedoff, double src, size_t vl) { + return vfmv_s_f_f64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmv_s_f_f64m4_tu (vfloat64m4_t maskedoff, double src, size_t vl) { + return vfmv_s_f_f64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i64( [[MASKEDOFF:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmv_s_f_f64m8_tu (vfloat64m8_t maskedoff, double src, size_t vl) { + return vfmv_s_f_f64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfmv_s_f_f16mf4_ta (_Float16 src, size_t vl) { + return vfmv_s_f_f16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfmv_s_f_f16mf2_ta (_Float16 src, size_t vl) { + return vfmv_s_f_f16mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfmv_s_f_f16m1_ta (_Float16 src, size_t vl) { + return vfmv_s_f_f16m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfmv_s_f_f16m2_ta (_Float16 src, size_t vl) { + return vfmv_s_f_f16m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfmv_s_f_f16m4_ta (_Float16 src, size_t vl) { + return vfmv_s_f_f16m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32f16.i64( poison, half [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vfmv_s_f_f16m8_ta (_Float16 src, size_t vl) { + return vfmv_s_f_f16m8_ta(src, vl); } // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2_ta( @@ -446,3 +878,75 @@ vfloat32mf2_t test_vfmv_s_f_f32mf2_ta (float src, size_t vl) { return vfmv_s_f_f32mf2_ta(src, vl); } + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmv_s_f_f32m1_ta (float src, size_t vl) { + return vfmv_s_f_f32m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmv_s_f_f32m2_ta (float src, size_t vl) { + return vfmv_s_f_f32m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmv_s_f_f32m4_ta (float src, size_t vl) { + return vfmv_s_f_f32m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16f32.i64( poison, float [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmv_s_f_f32m8_ta (float src, size_t vl) { + return vfmv_s_f_f32m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmv_s_f_f64m1_ta (double src, size_t vl) { + return vfmv_s_f_f64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmv_s_f_f64m2_ta (double src, size_t vl) { + return vfmv_s_f_f64m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmv_s_f_f64m4_ta (double src, size_t vl) { + return vfmv_s_f_f64m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8f64.i64( poison, double [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmv_s_f_f64m8_ta (double src, size_t vl) { + return vfmv_s_f_f64m8_ta(src, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmv.c @@ -11,7 +11,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) { +vint8mf8_t test_vmv_v_v_i8mf8 (vint8mf8_t src, size_t vl) { return vmv_v_v_i8mf8(src, vl); } @@ -20,7 +20,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmv_v_x_i8mf8(int8_t src, size_t vl) { +vint8mf8_t test_vmv_v_x_i8mf8 (int8_t src, size_t vl) { return vmv_v_x_i8mf8(src, vl); } @@ -29,7 +29,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) { +vint8mf4_t test_vmv_v_v_i8mf4 (vint8mf4_t src, size_t vl) { return vmv_v_v_i8mf4(src, vl); } @@ -38,7 +38,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmv_v_x_i8mf4(int8_t src, size_t vl) { +vint8mf4_t test_vmv_v_x_i8mf4 (int8_t src, size_t vl) { return vmv_v_x_i8mf4(src, vl); } @@ -47,7 +47,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) { +vint8mf2_t test_vmv_v_v_i8mf2 (vint8mf2_t src, size_t vl) { return vmv_v_v_i8mf2(src, vl); } @@ -56,7 +56,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmv_v_x_i8mf2(int8_t src, size_t vl) { +vint8mf2_t test_vmv_v_x_i8mf2 (int8_t src, size_t vl) { return vmv_v_x_i8mf2(src, vl); } @@ -65,7 +65,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) { +vint8m1_t test_vmv_v_v_i8m1 (vint8m1_t src, size_t vl) { return vmv_v_v_i8m1(src, vl); } @@ -74,7 +74,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmv_v_x_i8m1(int8_t src, size_t vl) { +vint8m1_t test_vmv_v_x_i8m1 (int8_t src, size_t vl) { return vmv_v_x_i8m1(src, vl); } @@ -83,7 +83,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) { +vint8m2_t test_vmv_v_v_i8m2 (vint8m2_t src, size_t vl) { return vmv_v_v_i8m2(src, vl); } @@ -92,7 +92,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmv_v_x_i8m2(int8_t src, size_t vl) { +vint8m2_t test_vmv_v_x_i8m2 (int8_t src, size_t vl) { return vmv_v_x_i8m2(src, vl); } @@ -101,7 +101,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) { +vint8m4_t test_vmv_v_v_i8m4 (vint8m4_t src, size_t vl) { return vmv_v_v_i8m4(src, vl); } @@ -110,7 +110,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmv_v_x_i8m4(int8_t src, size_t vl) { +vint8m4_t test_vmv_v_x_i8m4 (int8_t src, size_t vl) { return vmv_v_x_i8m4(src, vl); } @@ -119,7 +119,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) { +vint8m8_t test_vmv_v_v_i8m8 (vint8m8_t src, size_t vl) { return vmv_v_v_i8m8(src, vl); } @@ -128,7 +128,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv64i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmv_v_x_i8m8(int8_t src, size_t vl) { +vint8m8_t test_vmv_v_x_i8m8 (int8_t src, size_t vl) { return vmv_v_x_i8m8(src, vl); } @@ -137,7 +137,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) { +vint16mf4_t test_vmv_v_v_i16mf4 (vint16mf4_t src, size_t vl) { return vmv_v_v_i16mf4(src, vl); } @@ -146,7 +146,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmv_v_x_i16mf4(int16_t src, size_t vl) { +vint16mf4_t test_vmv_v_x_i16mf4 (int16_t src, size_t vl) { return vmv_v_x_i16mf4(src, vl); } @@ -155,7 +155,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) { +vint16mf2_t test_vmv_v_v_i16mf2 (vint16mf2_t src, size_t vl) { return vmv_v_v_i16mf2(src, vl); } @@ -164,7 +164,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmv_v_x_i16mf2(int16_t src, size_t vl) { +vint16mf2_t test_vmv_v_x_i16mf2 (int16_t src, size_t vl) { return vmv_v_x_i16mf2(src, vl); } @@ -173,7 +173,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) { +vint16m1_t test_vmv_v_v_i16m1 (vint16m1_t src, size_t vl) { return vmv_v_v_i16m1(src, vl); } @@ -182,7 +182,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmv_v_x_i16m1(int16_t src, size_t vl) { +vint16m1_t test_vmv_v_x_i16m1 (int16_t src, size_t vl) { return vmv_v_x_i16m1(src, vl); } @@ -191,7 +191,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) { +vint16m2_t test_vmv_v_v_i16m2 (vint16m2_t src, size_t vl) { return vmv_v_v_i16m2(src, vl); } @@ -200,7 +200,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmv_v_x_i16m2(int16_t src, size_t vl) { +vint16m2_t test_vmv_v_x_i16m2 (int16_t src, size_t vl) { return vmv_v_x_i16m2(src, vl); } @@ -209,7 +209,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) { +vint16m4_t test_vmv_v_v_i16m4 (vint16m4_t src, size_t vl) { return vmv_v_v_i16m4(src, vl); } @@ -218,7 +218,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmv_v_x_i16m4(int16_t src, size_t vl) { +vint16m4_t test_vmv_v_x_i16m4 (int16_t src, size_t vl) { return vmv_v_x_i16m4(src, vl); } @@ -227,7 +227,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) { +vint16m8_t test_vmv_v_v_i16m8 (vint16m8_t src, size_t vl) { return vmv_v_v_i16m8(src, vl); } @@ -236,7 +236,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmv_v_x_i16m8(int16_t src, size_t vl) { +vint16m8_t test_vmv_v_x_i16m8 (int16_t src, size_t vl) { return vmv_v_x_i16m8(src, vl); } @@ -245,7 +245,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) { +vint32mf2_t test_vmv_v_v_i32mf2 (vint32mf2_t src, size_t vl) { return vmv_v_v_i32mf2(src, vl); } @@ -254,7 +254,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_v_x_i32mf2(int32_t src, size_t vl) { +vint32mf2_t test_vmv_v_x_i32mf2 (int32_t src, size_t vl) { return vmv_v_x_i32mf2(src, vl); } @@ -263,7 +263,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) { +vint32m1_t test_vmv_v_v_i32m1 (vint32m1_t src, size_t vl) { return vmv_v_v_i32m1(src, vl); } @@ -272,7 +272,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmv_v_x_i32m1(int32_t src, size_t vl) { +vint32m1_t test_vmv_v_x_i32m1 (int32_t src, size_t vl) { return vmv_v_x_i32m1(src, vl); } @@ -281,7 +281,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) { +vint32m2_t test_vmv_v_v_i32m2 (vint32m2_t src, size_t vl) { return vmv_v_v_i32m2(src, vl); } @@ -290,7 +290,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmv_v_x_i32m2(int32_t src, size_t vl) { +vint32m2_t test_vmv_v_x_i32m2 (int32_t src, size_t vl) { return vmv_v_x_i32m2(src, vl); } @@ -299,7 +299,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) { +vint32m4_t test_vmv_v_v_i32m4 (vint32m4_t src, size_t vl) { return vmv_v_v_i32m4(src, vl); } @@ -308,7 +308,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmv_v_x_i32m4(int32_t src, size_t vl) { +vint32m4_t test_vmv_v_x_i32m4 (int32_t src, size_t vl) { return vmv_v_x_i32m4(src, vl); } @@ -317,7 +317,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) { +vint32m8_t test_vmv_v_v_i32m8 (vint32m8_t src, size_t vl) { return vmv_v_v_i32m8(src, vl); } @@ -326,7 +326,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmv_v_x_i32m8(int32_t src, size_t vl) { +vint32m8_t test_vmv_v_x_i32m8 (int32_t src, size_t vl) { return vmv_v_x_i32m8(src, vl); } @@ -335,7 +335,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) { +vint64m1_t test_vmv_v_v_i64m1 (vint64m1_t src, size_t vl) { return vmv_v_v_i64m1(src, vl); } @@ -344,7 +344,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmv_v_x_i64m1(int64_t src, size_t vl) { +vint64m1_t test_vmv_v_x_i64m1 (int64_t src, size_t vl) { return vmv_v_x_i64m1(src, vl); } @@ -353,7 +353,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) { +vint64m2_t test_vmv_v_v_i64m2 (vint64m2_t src, size_t vl) { return vmv_v_v_i64m2(src, vl); } @@ -362,7 +362,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmv_v_x_i64m2(int64_t src, size_t vl) { +vint64m2_t test_vmv_v_x_i64m2 (int64_t src, size_t vl) { return vmv_v_x_i64m2(src, vl); } @@ -371,7 +371,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) { +vint64m4_t test_vmv_v_v_i64m4 (vint64m4_t src, size_t vl) { return vmv_v_v_i64m4(src, vl); } @@ -380,7 +380,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmv_v_x_i64m4(int64_t src, size_t vl) { +vint64m4_t test_vmv_v_x_i64m4 (int64_t src, size_t vl) { return vmv_v_x_i64m4(src, vl); } @@ -389,7 +389,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) { +vint64m8_t test_vmv_v_v_i64m8 (vint64m8_t src, size_t vl) { return vmv_v_v_i64m8(src, vl); } @@ -398,7 +398,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmv_v_x_i64m8(int64_t src, size_t vl) { +vint64m8_t test_vmv_v_x_i64m8 (int64_t src, size_t vl) { return vmv_v_x_i64m8(src, vl); } @@ -407,7 +407,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) { +vuint8mf8_t test_vmv_v_v_u8mf8 (vuint8mf8_t src, size_t vl) { return vmv_v_v_u8mf8(src, vl); } @@ -416,7 +416,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmv_v_x_u8mf8(uint8_t src, size_t vl) { +vuint8mf8_t test_vmv_v_x_u8mf8 (uint8_t src, size_t vl) { return vmv_v_x_u8mf8(src, vl); } @@ -425,7 +425,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) { +vuint8mf4_t test_vmv_v_v_u8mf4 (vuint8mf4_t src, size_t vl) { return vmv_v_v_u8mf4(src, vl); } @@ -434,7 +434,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmv_v_x_u8mf4(uint8_t src, size_t vl) { +vuint8mf4_t test_vmv_v_x_u8mf4 (uint8_t src, size_t vl) { return vmv_v_x_u8mf4(src, vl); } @@ -443,7 +443,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) { +vuint8mf2_t test_vmv_v_v_u8mf2 (vuint8mf2_t src, size_t vl) { return vmv_v_v_u8mf2(src, vl); } @@ -452,7 +452,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmv_v_x_u8mf2(uint8_t src, size_t vl) { +vuint8mf2_t test_vmv_v_x_u8mf2 (uint8_t src, size_t vl) { return vmv_v_x_u8mf2(src, vl); } @@ -461,7 +461,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) { +vuint8m1_t test_vmv_v_v_u8m1 (vuint8m1_t src, size_t vl) { return vmv_v_v_u8m1(src, vl); } @@ -470,7 +470,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmv_v_x_u8m1(uint8_t src, size_t vl) { +vuint8m1_t test_vmv_v_x_u8m1 (uint8_t src, size_t vl) { return vmv_v_x_u8m1(src, vl); } @@ -479,7 +479,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) { +vuint8m2_t test_vmv_v_v_u8m2 (vuint8m2_t src, size_t vl) { return vmv_v_v_u8m2(src, vl); } @@ -488,7 +488,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmv_v_x_u8m2(uint8_t src, size_t vl) { +vuint8m2_t test_vmv_v_x_u8m2 (uint8_t src, size_t vl) { return vmv_v_x_u8m2(src, vl); } @@ -497,7 +497,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) { +vuint8m4_t test_vmv_v_v_u8m4 (vuint8m4_t src, size_t vl) { return vmv_v_v_u8m4(src, vl); } @@ -506,7 +506,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmv_v_x_u8m4(uint8_t src, size_t vl) { +vuint8m4_t test_vmv_v_x_u8m4 (uint8_t src, size_t vl) { return vmv_v_x_u8m4(src, vl); } @@ -515,7 +515,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) { +vuint8m8_t test_vmv_v_v_u8m8 (vuint8m8_t src, size_t vl) { return vmv_v_v_u8m8(src, vl); } @@ -524,7 +524,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv64i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmv_v_x_u8m8(uint8_t src, size_t vl) { +vuint8m8_t test_vmv_v_x_u8m8 (uint8_t src, size_t vl) { return vmv_v_x_u8m8(src, vl); } @@ -533,7 +533,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) { +vuint16mf4_t test_vmv_v_v_u16mf4 (vuint16mf4_t src, size_t vl) { return vmv_v_v_u16mf4(src, vl); } @@ -542,7 +542,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmv_v_x_u16mf4(uint16_t src, size_t vl) { +vuint16mf4_t test_vmv_v_x_u16mf4 (uint16_t src, size_t vl) { return vmv_v_x_u16mf4(src, vl); } @@ -551,7 +551,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) { +vuint16mf2_t test_vmv_v_v_u16mf2 (vuint16mf2_t src, size_t vl) { return vmv_v_v_u16mf2(src, vl); } @@ -560,7 +560,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmv_v_x_u16mf2(uint16_t src, size_t vl) { +vuint16mf2_t test_vmv_v_x_u16mf2 (uint16_t src, size_t vl) { return vmv_v_x_u16mf2(src, vl); } @@ -569,7 +569,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) { +vuint16m1_t test_vmv_v_v_u16m1 (vuint16m1_t src, size_t vl) { return vmv_v_v_u16m1(src, vl); } @@ -578,7 +578,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmv_v_x_u16m1(uint16_t src, size_t vl) { +vuint16m1_t test_vmv_v_x_u16m1 (uint16_t src, size_t vl) { return vmv_v_x_u16m1(src, vl); } @@ -587,7 +587,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) { +vuint16m2_t test_vmv_v_v_u16m2 (vuint16m2_t src, size_t vl) { return vmv_v_v_u16m2(src, vl); } @@ -596,7 +596,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmv_v_x_u16m2(uint16_t src, size_t vl) { +vuint16m2_t test_vmv_v_x_u16m2 (uint16_t src, size_t vl) { return vmv_v_x_u16m2(src, vl); } @@ -605,7 +605,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) { +vuint16m4_t test_vmv_v_v_u16m4 (vuint16m4_t src, size_t vl) { return vmv_v_v_u16m4(src, vl); } @@ -614,7 +614,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmv_v_x_u16m4(uint16_t src, size_t vl) { +vuint16m4_t test_vmv_v_x_u16m4 (uint16_t src, size_t vl) { return vmv_v_x_u16m4(src, vl); } @@ -623,7 +623,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) { +vuint16m8_t test_vmv_v_v_u16m8 (vuint16m8_t src, size_t vl) { return vmv_v_v_u16m8(src, vl); } @@ -632,7 +632,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmv_v_x_u16m8(uint16_t src, size_t vl) { +vuint16m8_t test_vmv_v_x_u16m8 (uint16_t src, size_t vl) { return vmv_v_x_u16m8(src, vl); } @@ -641,7 +641,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) { +vuint32mf2_t test_vmv_v_v_u32mf2 (vuint32mf2_t src, size_t vl) { return vmv_v_v_u32mf2(src, vl); } @@ -650,7 +650,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_v_x_u32mf2(uint32_t src, size_t vl) { +vuint32mf2_t test_vmv_v_x_u32mf2 (uint32_t src, size_t vl) { return vmv_v_x_u32mf2(src, vl); } @@ -659,7 +659,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) { +vuint32m1_t test_vmv_v_v_u32m1 (vuint32m1_t src, size_t vl) { return vmv_v_v_u32m1(src, vl); } @@ -668,7 +668,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmv_v_x_u32m1(uint32_t src, size_t vl) { +vuint32m1_t test_vmv_v_x_u32m1 (uint32_t src, size_t vl) { return vmv_v_x_u32m1(src, vl); } @@ -677,7 +677,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) { +vuint32m2_t test_vmv_v_v_u32m2 (vuint32m2_t src, size_t vl) { return vmv_v_v_u32m2(src, vl); } @@ -686,7 +686,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmv_v_x_u32m2(uint32_t src, size_t vl) { +vuint32m2_t test_vmv_v_x_u32m2 (uint32_t src, size_t vl) { return vmv_v_x_u32m2(src, vl); } @@ -695,7 +695,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) { +vuint32m4_t test_vmv_v_v_u32m4 (vuint32m4_t src, size_t vl) { return vmv_v_v_u32m4(src, vl); } @@ -704,7 +704,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmv_v_x_u32m4(uint32_t src, size_t vl) { +vuint32m4_t test_vmv_v_x_u32m4 (uint32_t src, size_t vl) { return vmv_v_x_u32m4(src, vl); } @@ -713,7 +713,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) { +vuint32m8_t test_vmv_v_v_u32m8 (vuint32m8_t src, size_t vl) { return vmv_v_v_u32m8(src, vl); } @@ -722,7 +722,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmv_v_x_u32m8(uint32_t src, size_t vl) { +vuint32m8_t test_vmv_v_x_u32m8 (uint32_t src, size_t vl) { return vmv_v_x_u32m8(src, vl); } @@ -731,7 +731,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) { +vuint64m1_t test_vmv_v_v_u64m1 (vuint64m1_t src, size_t vl) { return vmv_v_v_u64m1(src, vl); } @@ -740,7 +740,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmv_v_x_u64m1(uint64_t src, size_t vl) { +vuint64m1_t test_vmv_v_x_u64m1 (uint64_t src, size_t vl) { return vmv_v_x_u64m1(src, vl); } @@ -749,7 +749,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) { +vuint64m2_t test_vmv_v_v_u64m2 (vuint64m2_t src, size_t vl) { return vmv_v_v_u64m2(src, vl); } @@ -758,7 +758,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmv_v_x_u64m2(uint64_t src, size_t vl) { +vuint64m2_t test_vmv_v_x_u64m2 (uint64_t src, size_t vl) { return vmv_v_x_u64m2(src, vl); } @@ -767,7 +767,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) { +vuint64m4_t test_vmv_v_v_u64m4 (vuint64m4_t src, size_t vl) { return vmv_v_v_u64m4(src, vl); } @@ -776,7 +776,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmv_v_x_u64m4(uint64_t src, size_t vl) { +vuint64m4_t test_vmv_v_x_u64m4 (uint64_t src, size_t vl) { return vmv_v_x_u64m4(src, vl); } @@ -785,7 +785,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) { +vuint64m8_t test_vmv_v_v_u64m8 (vuint64m8_t src, size_t vl) { return vmv_v_v_u64m8(src, vl); } @@ -794,16 +794,70 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmv_v_x_u64m8(uint64_t src, size_t vl) { +vuint64m8_t test_vmv_v_x_u64m8 (uint64_t src, size_t vl) { return vmv_v_x_u64m8(src, vl); } +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmv_v_v_f16mf4 (vfloat16mf4_t src, size_t vl) { + return vmv_v_v_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmv_v_v_f16mf2 (vfloat16mf2_t src, size_t vl) { + return vmv_v_v_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmv_v_v_f16m1 (vfloat16m1_t src, size_t vl) { + return vmv_v_v_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmv_v_v_f16m2 (vfloat16m2_t src, size_t vl) { + return vmv_v_v_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmv_v_v_f16m4 (vfloat16m4_t src, size_t vl) { + return vmv_v_v_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmv_v_v_f16m8 (vfloat16m8_t src, size_t vl) { + return vmv_v_v_f16m8(src, vl); +} + // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) { +vfloat32mf2_t test_vmv_v_v_f32mf2 (vfloat32mf2_t src, size_t vl) { return vmv_v_v_f32mf2(src, vl); } @@ -812,7 +866,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) { +vfloat32m1_t test_vmv_v_v_f32m1 (vfloat32m1_t src, size_t vl) { return vmv_v_v_f32m1(src, vl); } @@ -821,7 +875,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) { +vfloat32m2_t test_vmv_v_v_f32m2 (vfloat32m2_t src, size_t vl) { return vmv_v_v_f32m2(src, vl); } @@ -830,7 +884,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) { +vfloat32m4_t test_vmv_v_v_f32m4 (vfloat32m4_t src, size_t vl) { return vmv_v_v_f32m4(src, vl); } @@ -839,7 +893,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) { +vfloat32m8_t test_vmv_v_v_f32m8 (vfloat32m8_t src, size_t vl) { return vmv_v_v_f32m8(src, vl); } @@ -848,7 +902,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) { +vfloat64m1_t test_vmv_v_v_f64m1 (vfloat64m1_t src, size_t vl) { return vmv_v_v_f64m1(src, vl); } @@ -857,7 +911,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) { +vfloat64m2_t test_vmv_v_v_f64m2 (vfloat64m2_t src, size_t vl) { return vmv_v_v_f64m2(src, vl); } @@ -866,7 +920,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) { +vfloat64m4_t test_vmv_v_v_f64m4 (vfloat64m4_t src, size_t vl) { return vmv_v_v_f64m4(src, vl); } @@ -875,7 +929,7 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) { +vfloat64m8_t test_vmv_v_v_f64m8 (vfloat64m8_t src, size_t vl) { return vmv_v_v_f64m8(src, vl); } @@ -884,15 +938,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) { return vmv_x_s_i8mf8_i8(src); } +int8_t test_vmv_x_s_i8mf8_i8 (vint8mf8_t src) { + return vmv_x_s_i8mf8_i8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf8_t test_vmv_s_x_i8mf8(vint8mf8_t dst, int8_t src, size_t vl) { - return vmv_s_x_i8mf8(dst, src, vl); +vint8mf8_t test_vmv_s_x_i8mf8 (int8_t src, size_t vl) { + return vmv_s_x_i8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf4_i8( @@ -900,15 +956,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) { return vmv_x_s_i8mf4_i8(src); } +int8_t test_vmv_x_s_i8mf4_i8 (vint8mf4_t src) { + return vmv_x_s_i8mf4_i8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf4_t test_vmv_s_x_i8mf4(vint8mf4_t dst, int8_t src, size_t vl) { - return vmv_s_x_i8mf4(dst, src, vl); +vint8mf4_t test_vmv_s_x_i8mf4 (int8_t src, size_t vl) { + return vmv_s_x_i8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf2_i8( @@ -916,15 +974,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) { return vmv_x_s_i8mf2_i8(src); } +int8_t test_vmv_x_s_i8mf2_i8 (vint8mf2_t src) { + return vmv_x_s_i8mf2_i8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8mf2_t test_vmv_s_x_i8mf2(vint8mf2_t dst, int8_t src, size_t vl) { - return vmv_s_x_i8mf2(dst, src, vl); +vint8mf2_t test_vmv_s_x_i8mf2 (int8_t src, size_t vl) { + return vmv_s_x_i8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8m1_i8( @@ -932,15 +992,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) { return vmv_x_s_i8m1_i8(src); } +int8_t test_vmv_x_s_i8m1_i8 (vint8m1_t src) { + return vmv_x_s_i8m1_i8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_i8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vmv_s_x_i8m1(vint8m1_t dst, int8_t src, size_t vl) { - return vmv_s_x_i8m1(dst, src, vl); +vint8m1_t test_vmv_s_x_i8m1 (int8_t src, size_t vl) { + return vmv_s_x_i8m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8m2_i8( @@ -948,15 +1010,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) { return vmv_x_s_i8m2_i8(src); } +int8_t test_vmv_x_s_i8m2_i8 (vint8m2_t src) { + return vmv_x_s_i8m2_i8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_i8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vmv_s_x_i8m2(vint8m2_t dst, int8_t src, size_t vl) { - return vmv_s_x_i8m2(dst, src, vl); +vint8m2_t test_vmv_s_x_i8m2 (int8_t src, size_t vl) { + return vmv_s_x_i8m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8m4_i8( @@ -964,15 +1028,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) { return vmv_x_s_i8m4_i8(src); } +int8_t test_vmv_x_s_i8m4_i8 (vint8m4_t src) { + return vmv_x_s_i8m4_i8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_i8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vmv_s_x_i8m4(vint8m4_t dst, int8_t src, size_t vl) { - return vmv_s_x_i8m4(dst, src, vl); +vint8m4_t test_vmv_s_x_i8m4 (int8_t src, size_t vl) { + return vmv_s_x_i8m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i8m8_i8( @@ -980,15 +1046,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) { return vmv_x_s_i8m8_i8(src); } +int8_t test_vmv_x_s_i8m8_i8 (vint8m8_t src) { + return vmv_x_s_i8m8_i8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_i8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vmv_s_x_i8m8(vint8m8_t dst, int8_t src, size_t vl) { - return vmv_s_x_i8m8(dst, src, vl); +vint8m8_t test_vmv_s_x_i8m8 (int8_t src, size_t vl) { + return vmv_s_x_i8m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf4_i16( @@ -996,17 +1064,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) { +int16_t test_vmv_x_s_i16mf4_i16 (vint16mf4_t src) { return vmv_x_s_i16mf4_i16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf4_t test_vmv_s_x_i16mf4(vint16mf4_t dst, int16_t src, size_t vl) { - return vmv_s_x_i16mf4(dst, src, vl); +vint16mf4_t test_vmv_s_x_i16mf4 (int16_t src, size_t vl) { + return vmv_s_x_i16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf2_i16( @@ -1014,17 +1082,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) { +int16_t test_vmv_x_s_i16mf2_i16 (vint16mf2_t src) { return vmv_x_s_i16mf2_i16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16mf2_t test_vmv_s_x_i16mf2(vint16mf2_t dst, int16_t src, size_t vl) { - return vmv_s_x_i16mf2(dst, src, vl); +vint16mf2_t test_vmv_s_x_i16mf2 (int16_t src, size_t vl) { + return vmv_s_x_i16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16m1_i16( @@ -1032,17 +1100,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) { +int16_t test_vmv_x_s_i16m1_i16 (vint16m1_t src) { return vmv_x_s_i16m1_i16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vmv_s_x_i16m1(vint16m1_t dst, int16_t src, size_t vl) { - return vmv_s_x_i16m1(dst, src, vl); +vint16m1_t test_vmv_s_x_i16m1 (int16_t src, size_t vl) { + return vmv_s_x_i16m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16m2_i16( @@ -1050,17 +1118,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) { +int16_t test_vmv_x_s_i16m2_i16 (vint16m2_t src) { return vmv_x_s_i16m2_i16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vmv_s_x_i16m2(vint16m2_t dst, int16_t src, size_t vl) { - return vmv_s_x_i16m2(dst, src, vl); +vint16m2_t test_vmv_s_x_i16m2 (int16_t src, size_t vl) { + return vmv_s_x_i16m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16m4_i16( @@ -1068,17 +1136,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) { +int16_t test_vmv_x_s_i16m4_i16 (vint16m4_t src) { return vmv_x_s_i16m4_i16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vmv_s_x_i16m4(vint16m4_t dst, int16_t src, size_t vl) { - return vmv_s_x_i16m4(dst, src, vl); +vint16m4_t test_vmv_s_x_i16m4 (int16_t src, size_t vl) { + return vmv_s_x_i16m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i16m8_i16( @@ -1086,17 +1154,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) { +int16_t test_vmv_x_s_i16m8_i16 (vint16m8_t src) { return vmv_x_s_i16m8_i16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vmv_s_x_i16m8(vint16m8_t dst, int16_t src, size_t vl) { - return vmv_s_x_i16m8(dst, src, vl); +vint16m8_t test_vmv_s_x_i16m8 (int16_t src, size_t vl) { + return vmv_s_x_i16m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32mf2_i32( @@ -1104,17 +1172,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) { +int32_t test_vmv_x_s_i32mf2_i32 (vint32mf2_t src) { return vmv_x_s_i32mf2_i32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_s_x_i32mf2(vint32mf2_t dst, int32_t src, size_t vl) { - return vmv_s_x_i32mf2(dst, src, vl); +vint32mf2_t test_vmv_s_x_i32mf2 (int32_t src, size_t vl) { + return vmv_s_x_i32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32m1_i32( @@ -1122,17 +1190,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) { +int32_t test_vmv_x_s_i32m1_i32 (vint32m1_t src) { return vmv_x_s_i32m1_i32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vmv_s_x_i32m1(vint32m1_t dst, int32_t src, size_t vl) { - return vmv_s_x_i32m1(dst, src, vl); +vint32m1_t test_vmv_s_x_i32m1 (int32_t src, size_t vl) { + return vmv_s_x_i32m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32m2_i32( @@ -1140,17 +1208,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) { +int32_t test_vmv_x_s_i32m2_i32 (vint32m2_t src) { return vmv_x_s_i32m2_i32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vmv_s_x_i32m2(vint32m2_t dst, int32_t src, size_t vl) { - return vmv_s_x_i32m2(dst, src, vl); +vint32m2_t test_vmv_s_x_i32m2 (int32_t src, size_t vl) { + return vmv_s_x_i32m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32m4_i32( @@ -1158,17 +1226,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) { +int32_t test_vmv_x_s_i32m4_i32 (vint32m4_t src) { return vmv_x_s_i32m4_i32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vmv_s_x_i32m4(vint32m4_t dst, int32_t src, size_t vl) { - return vmv_s_x_i32m4(dst, src, vl); +vint32m4_t test_vmv_s_x_i32m4 (int32_t src, size_t vl) { + return vmv_s_x_i32m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i32m8_i32( @@ -1176,17 +1244,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) { +int32_t test_vmv_x_s_i32m8_i32 (vint32m8_t src) { return vmv_x_s_i32m8_i32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vmv_s_x_i32m8(vint32m8_t dst, int32_t src, size_t vl) { - return vmv_s_x_i32m8(dst, src, vl); +vint32m8_t test_vmv_s_x_i32m8 (int32_t src, size_t vl) { + return vmv_s_x_i32m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i64m1_i64( @@ -1194,17 +1262,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) { +int64_t test_vmv_x_s_i64m1_i64 (vint64m1_t src) { return vmv_x_s_i64m1_i64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vmv_s_x_i64m1(vint64m1_t dst, int64_t src, size_t vl) { - return vmv_s_x_i64m1(dst, src, vl); +vint64m1_t test_vmv_s_x_i64m1 (int64_t src, size_t vl) { + return vmv_s_x_i64m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i64m2_i64( @@ -1212,17 +1280,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) { +int64_t test_vmv_x_s_i64m2_i64 (vint64m2_t src) { return vmv_x_s_i64m2_i64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vmv_s_x_i64m2(vint64m2_t dst, int64_t src, size_t vl) { - return vmv_s_x_i64m2(dst, src, vl); +vint64m2_t test_vmv_s_x_i64m2 (int64_t src, size_t vl) { + return vmv_s_x_i64m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i64m4_i64( @@ -1230,17 +1298,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) { +int64_t test_vmv_x_s_i64m4_i64 (vint64m4_t src) { return vmv_x_s_i64m4_i64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vmv_s_x_i64m4(vint64m4_t dst, int64_t src, size_t vl) { - return vmv_s_x_i64m4(dst, src, vl); +vint64m4_t test_vmv_s_x_i64m4 (int64_t src, size_t vl) { + return vmv_s_x_i64m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_i64m8_i64( @@ -1248,17 +1316,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) { +int64_t test_vmv_x_s_i64m8_i64 (vint64m8_t src) { return vmv_x_s_i64m8_i64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_i64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vmv_s_x_i64m8(vint64m8_t dst, int64_t src, size_t vl) { - return vmv_s_x_i64m8(dst, src, vl); +vint64m8_t test_vmv_s_x_i64m8 (int64_t src, size_t vl) { + return vmv_s_x_i64m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf8_u8( @@ -1266,15 +1334,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) { return vmv_x_s_u8mf8_u8(src); } +uint8_t test_vmv_x_s_u8mf8_u8 (vuint8mf8_t src) { + return vmv_x_s_u8mf8_u8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf8_t test_vmv_s_x_u8mf8(vuint8mf8_t dst, uint8_t src, size_t vl) { - return vmv_s_x_u8mf8(dst, src, vl); +vuint8mf8_t test_vmv_s_x_u8mf8 (uint8_t src, size_t vl) { + return vmv_s_x_u8mf8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf4_u8( @@ -1282,15 +1352,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) { return vmv_x_s_u8mf4_u8(src); } +uint8_t test_vmv_x_s_u8mf4_u8 (vuint8mf4_t src) { + return vmv_x_s_u8mf4_u8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf4_t test_vmv_s_x_u8mf4(vuint8mf4_t dst, uint8_t src, size_t vl) { - return vmv_s_x_u8mf4(dst, src, vl); +vuint8mf4_t test_vmv_s_x_u8mf4 (uint8_t src, size_t vl) { + return vmv_s_x_u8mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf2_u8( @@ -1298,15 +1370,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) { return vmv_x_s_u8mf2_u8(src); } +uint8_t test_vmv_x_s_u8mf2_u8 (vuint8mf2_t src) { + return vmv_x_s_u8mf2_u8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8mf2_t test_vmv_s_x_u8mf2(vuint8mf2_t dst, uint8_t src, size_t vl) { - return vmv_s_x_u8mf2(dst, src, vl); +vuint8mf2_t test_vmv_s_x_u8mf2 (uint8_t src, size_t vl) { + return vmv_s_x_u8mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8m1_u8( @@ -1314,15 +1388,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) { return vmv_x_s_u8m1_u8(src); } +uint8_t test_vmv_x_s_u8m1_u8 (vuint8m1_t src) { + return vmv_x_s_u8m1_u8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_u8m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vmv_s_x_u8m1(vuint8m1_t dst, uint8_t src, size_t vl) { - return vmv_s_x_u8m1(dst, src, vl); +vuint8m1_t test_vmv_s_x_u8m1 (uint8_t src, size_t vl) { + return vmv_s_x_u8m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8m2_u8( @@ -1330,15 +1406,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) { return vmv_x_s_u8m2_u8(src); } +uint8_t test_vmv_x_s_u8m2_u8 (vuint8m2_t src) { + return vmv_x_s_u8m2_u8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_u8m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vmv_s_x_u8m2(vuint8m2_t dst, uint8_t src, size_t vl) { - return vmv_s_x_u8m2(dst, src, vl); +vuint8m2_t test_vmv_s_x_u8m2 (uint8_t src, size_t vl) { + return vmv_s_x_u8m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8m4_u8( @@ -1346,15 +1424,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) { return vmv_x_s_u8m4_u8(src); } +uint8_t test_vmv_x_s_u8m4_u8 (vuint8m4_t src) { + return vmv_x_s_u8m4_u8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_u8m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vmv_s_x_u8m4(vuint8m4_t dst, uint8_t src, size_t vl) { - return vmv_s_x_u8m4(dst, src, vl); +vuint8m4_t test_vmv_s_x_u8m4 (uint8_t src, size_t vl) { + return vmv_s_x_u8m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u8m8_u8( @@ -1362,15 +1442,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i8 [[TMP0]] // -uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) { return vmv_x_s_u8m8_u8(src); } +uint8_t test_vmv_x_s_u8m8_u8 (vuint8m8_t src) { + return vmv_x_s_u8m8_u8(src); +} // CHECK-RV64-LABEL: @test_vmv_s_x_u8m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vmv_s_x_u8m8(vuint8m8_t dst, uint8_t src, size_t vl) { - return vmv_s_x_u8m8(dst, src, vl); +vuint8m8_t test_vmv_s_x_u8m8 (uint8_t src, size_t vl) { + return vmv_s_x_u8m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf4_u16( @@ -1378,17 +1460,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) { +uint16_t test_vmv_x_s_u16mf4_u16 (vuint16mf4_t src) { return vmv_x_s_u16mf4_u16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf4_t test_vmv_s_x_u16mf4(vuint16mf4_t dst, uint16_t src, size_t vl) { - return vmv_s_x_u16mf4(dst, src, vl); +vuint16mf4_t test_vmv_s_x_u16mf4 (uint16_t src, size_t vl) { + return vmv_s_x_u16mf4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf2_u16( @@ -1396,17 +1478,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) { +uint16_t test_vmv_x_s_u16mf2_u16 (vuint16mf2_t src) { return vmv_x_s_u16mf2_u16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16mf2_t test_vmv_s_x_u16mf2(vuint16mf2_t dst, uint16_t src, size_t vl) { - return vmv_s_x_u16mf2(dst, src, vl); +vuint16mf2_t test_vmv_s_x_u16mf2 (uint16_t src, size_t vl) { + return vmv_s_x_u16mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16m1_u16( @@ -1414,17 +1496,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) { +uint16_t test_vmv_x_s_u16m1_u16 (vuint16m1_t src) { return vmv_x_s_u16m1_u16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vmv_s_x_u16m1(vuint16m1_t dst, uint16_t src, size_t vl) { - return vmv_s_x_u16m1(dst, src, vl); +vuint16m1_t test_vmv_s_x_u16m1 (uint16_t src, size_t vl) { + return vmv_s_x_u16m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16m2_u16( @@ -1432,17 +1514,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) { +uint16_t test_vmv_x_s_u16m2_u16 (vuint16m2_t src) { return vmv_x_s_u16m2_u16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vmv_s_x_u16m2(vuint16m2_t dst, uint16_t src, size_t vl) { - return vmv_s_x_u16m2(dst, src, vl); +vuint16m2_t test_vmv_s_x_u16m2 (uint16_t src, size_t vl) { + return vmv_s_x_u16m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16m4_u16( @@ -1450,17 +1532,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) { +uint16_t test_vmv_x_s_u16m4_u16 (vuint16m4_t src) { return vmv_x_s_u16m4_u16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vmv_s_x_u16m4(vuint16m4_t dst, uint16_t src, size_t vl) { - return vmv_s_x_u16m4(dst, src, vl); +vuint16m4_t test_vmv_s_x_u16m4 (uint16_t src, size_t vl) { + return vmv_s_x_u16m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u16m8_u16( @@ -1468,17 +1550,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i16 [[TMP0]] // -uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) { +uint16_t test_vmv_x_s_u16m8_u16 (vuint16m8_t src) { return vmv_x_s_u16m8_u16(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u16m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vmv_s_x_u16m8(vuint16m8_t dst, uint16_t src, size_t vl) { - return vmv_s_x_u16m8(dst, src, vl); +vuint16m8_t test_vmv_s_x_u16m8 (uint16_t src, size_t vl) { + return vmv_s_x_u16m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32mf2_u32( @@ -1486,17 +1568,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) { +uint32_t test_vmv_x_s_u32mf2_u32 (vuint32mf2_t src) { return vmv_x_s_u32mf2_u32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_s_x_u32mf2(vuint32mf2_t dst, uint32_t src, size_t vl) { - return vmv_s_x_u32mf2(dst, src, vl); +vuint32mf2_t test_vmv_s_x_u32mf2 (uint32_t src, size_t vl) { + return vmv_s_x_u32mf2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32m1_u32( @@ -1504,17 +1586,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) { +uint32_t test_vmv_x_s_u32m1_u32 (vuint32m1_t src) { return vmv_x_s_u32m1_u32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vmv_s_x_u32m1(vuint32m1_t dst, uint32_t src, size_t vl) { - return vmv_s_x_u32m1(dst, src, vl); +vuint32m1_t test_vmv_s_x_u32m1 (uint32_t src, size_t vl) { + return vmv_s_x_u32m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32m2_u32( @@ -1522,17 +1604,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) { +uint32_t test_vmv_x_s_u32m2_u32 (vuint32m2_t src) { return vmv_x_s_u32m2_u32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vmv_s_x_u32m2(vuint32m2_t dst, uint32_t src, size_t vl) { - return vmv_s_x_u32m2(dst, src, vl); +vuint32m2_t test_vmv_s_x_u32m2 (uint32_t src, size_t vl) { + return vmv_s_x_u32m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32m4_u32( @@ -1540,17 +1622,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) { +uint32_t test_vmv_x_s_u32m4_u32 (vuint32m4_t src) { return vmv_x_s_u32m4_u32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vmv_s_x_u32m4(vuint32m4_t dst, uint32_t src, size_t vl) { - return vmv_s_x_u32m4(dst, src, vl); +vuint32m4_t test_vmv_s_x_u32m4 (uint32_t src, size_t vl) { + return vmv_s_x_u32m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u32m8_u32( @@ -1558,17 +1640,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i32 [[TMP0]] // -uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) { +uint32_t test_vmv_x_s_u32m8_u32 (vuint32m8_t src) { return vmv_x_s_u32m8_u32(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u32m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vmv_s_x_u32m8(vuint32m8_t dst, uint32_t src, size_t vl) { - return vmv_s_x_u32m8(dst, src, vl); +vuint32m8_t test_vmv_s_x_u32m8 (uint32_t src, size_t vl) { + return vmv_s_x_u32m8(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u64m1_u64( @@ -1576,17 +1658,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) { +uint64_t test_vmv_x_s_u64m1_u64 (vuint64m1_t src) { return vmv_x_s_u64m1_u64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u64m1( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vmv_s_x_u64m1(vuint64m1_t dst, uint64_t src, size_t vl) { - return vmv_s_x_u64m1(dst, src, vl); +vuint64m1_t test_vmv_s_x_u64m1 (uint64_t src, size_t vl) { + return vmv_s_x_u64m1(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u64m2_u64( @@ -1594,17 +1676,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) { +uint64_t test_vmv_x_s_u64m2_u64 (vuint64m2_t src) { return vmv_x_s_u64m2_u64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u64m2( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vmv_s_x_u64m2(vuint64m2_t dst, uint64_t src, size_t vl) { - return vmv_s_x_u64m2(dst, src, vl); +vuint64m2_t test_vmv_s_x_u64m2 (uint64_t src, size_t vl) { + return vmv_s_x_u64m2(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u64m4_u64( @@ -1612,17 +1694,17 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) { +uint64_t test_vmv_x_s_u64m4_u64 (vuint64m4_t src) { return vmv_x_s_u64m4_u64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u64m4( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vmv_s_x_u64m4(vuint64m4_t dst, uint64_t src, size_t vl) { - return vmv_s_x_u64m4(dst, src, vl); +vuint64m4_t test_vmv_s_x_u64m4 (uint64_t src, size_t vl) { + return vmv_s_x_u64m4(src, vl); } // CHECK-RV64-LABEL: @test_vmv_x_s_u64m8_u64( @@ -1630,195 +1712,2661 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64( [[SRC:%.*]]) // CHECK-RV64-NEXT: ret i64 [[TMP0]] // -uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) { +uint64_t test_vmv_x_s_u64m8_u64 (vuint64m8_t src) { return vmv_x_s_u64m8_u64(src); } // CHECK-RV64-LABEL: @test_vmv_s_x_u64m8( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vmv_s_x_u64m8(vuint64m8_t dst, uint64_t src, size_t vl) { - return vmv_s_x_u64m8(dst, src, vl); +vuint64m8_t test_vmv_s_x_u64m8 (uint64_t src, size_t vl) { + return vmv_s_x_u64m8(src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf4_t test_vmv_v_v_f16mf4 (vfloat16mf4_t src, size_t vl) { - return vmv_v_v_f16mf4(src, vl); +vint8mf8_t test_vmv_v_v_i8mf8_tu (vint8mf8_t maskedoff, vint8mf8_t src, size_t vl) { + return vmv_v_v_i8mf8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8mf8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16mf2_t test_vmv_v_v_f16mf2 (vfloat16mf2_t src, size_t vl) { - return vmv_v_v_f16mf2(src, vl); +vint8mf8_t test_vmv_v_x_i8mf8_tu (vint8mf8_t maskedoff, int8_t src, size_t vl) { + return vmv_v_x_i8mf8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_f16m1( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vmv_v_v_f16m1 (vfloat16m1_t src, size_t vl) { - return vmv_v_v_f16m1(src, vl); +vint8mf4_t test_vmv_v_v_i8mf4_tu (vint8mf4_t maskedoff, vint8mf4_t src, size_t vl) { + return vmv_v_v_i8mf4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_f16m2( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vmv_v_v_f16m2 (vfloat16m2_t src, size_t vl) { - return vmv_v_v_f16m2(src, vl); +vint8mf4_t test_vmv_v_x_i8mf4_tu (vint8mf4_t maskedoff, int8_t src, size_t vl) { + return vmv_v_x_i8mf4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_f16m4( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vmv_v_v_f16m4 (vfloat16m4_t src, size_t vl) { - return vmv_v_v_f16m4(src, vl); +vint8mf2_t test_vmv_v_v_i8mf2_tu (vint8mf2_t maskedoff, vint8mf2_t src, size_t vl) { + return vmv_v_v_i8mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_f16m8( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vmv_v_v_f16m8 (vfloat16m8_t src, size_t vl) { - return vmv_v_v_f16m8(src, vl); +vint8mf2_t test_vmv_v_x_i8mf2_tu (vint8mf2_t maskedoff, int8_t src, size_t vl) { + return vmv_v_x_i8mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_v_v_i32mf2_tu (vint32mf2_t merge, vint32mf2_t src, size_t vl) { - return vmv_v_v_i32mf2_tu(merge, src, vl); +vint8m1_t test_vmv_v_v_i8m1_tu (vint8m1_t maskedoff, vint8m1_t src, size_t vl) { + return vmv_v_v_i8m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( [[MERGE:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_v_x_i32mf2_tu (vint32mf2_t merge, int32_t src, size_t vl) { - return vmv_v_x_i32mf2_tu(merge, src, vl); +vint8m1_t test_vmv_v_x_i8m1_tu (vint8m1_t maskedoff, int8_t src, size_t vl) { + return vmv_v_x_i8m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_v_v_u32mf2_tu (vuint32mf2_t merge, vuint32mf2_t src, size_t vl) { - return vmv_v_v_u32mf2_tu(merge, src, vl); +vint8m2_t test_vmv_v_v_i8m2_tu (vint8m2_t maskedoff, vint8m2_t src, size_t vl) { + return vmv_v_v_i8m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8m2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( [[MERGE:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_v_x_u32mf2_tu (vuint32mf2_t merge, uint32_t src, size_t vl) { - return vmv_v_x_u32mf2_tu(merge, src, vl); +vint8m2_t test_vmv_v_x_i8m2_tu (vint8m2_t maskedoff, int8_t src, size_t vl) { + return vmv_v_x_i8m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_v_v_i32mf2_ta (vint32mf2_t src, size_t vl) { - return vmv_v_v_i32mf2_ta(src, vl); +vint8m4_t test_vmv_v_v_i8m4_tu (vint8m4_t maskedoff, vint8m4_t src, size_t vl) { + return vmv_v_v_i8m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8m4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_v_x_i32mf2_ta (int32_t src, size_t vl) { - return vmv_v_x_i32mf2_ta(src, vl); +vint8m4_t test_vmv_v_x_i8m4_tu (vint8m4_t maskedoff, int8_t src, size_t vl) { + return vmv_v_x_i8m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_v_v_u32mf2_ta (vuint32mf2_t src, size_t vl) { - return vmv_v_v_u32mf2_ta(src, vl); +vint8m8_t test_vmv_v_v_i8m8_tu (vint8m8_t maskedoff, vint8m8_t src, size_t vl) { + return vmv_v_v_i8m8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vmv_v_x_i8m8_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv64i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_v_x_u32mf2_ta (uint32_t src, size_t vl) { - return vmv_v_x_u32mf2_ta(src, vl); +vint8m8_t test_vmv_v_x_i8m8_tu (vint8m8_t maskedoff, int8_t src, size_t vl) { + return vmv_v_x_i8m8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( [[MERGE:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmv_v_v_f32mf2_tu (vfloat32mf2_t merge, vfloat32mf2_t src, size_t vl) { - return vmv_v_v_f32mf2_tu(merge, src, vl); +vint16mf4_t test_vmv_v_v_i16mf4_tu (vint16mf4_t maskedoff, vint16mf4_t src, size_t vl) { + return vmv_v_v_i16mf4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2_ta( +// CHECK-RV64-LABEL: @test_vmv_v_x_i16mf4_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32mf2_t test_vmv_v_v_f32mf2_ta (vfloat32mf2_t src, size_t vl) { - return vmv_v_v_f32mf2_ta(src, vl); +vint16mf4_t test_vmv_v_x_i16mf4_tu (vint16mf4_t maskedoff, int16_t src, size_t vl) { + return vmv_v_x_i16mf4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[MERGE:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_s_x_i32mf2_tu (vint32mf2_t merge, int32_t src, size_t vl) { - return vmv_s_x_i32mf2_tu(merge, src, vl); +vint16mf2_t test_vmv_v_v_i16mf2_tu (vint16mf2_t maskedoff, vint16mf2_t src, size_t vl) { + return vmv_v_v_i16mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2_tu( +// CHECK-RV64-LABEL: @test_vmv_v_x_i16mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[MERGE:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_s_x_u32mf2_tu (vuint32mf2_t merge, uint32_t src, size_t vl) { - return vmv_s_x_u32mf2_tu(merge, src, vl); +vint16mf2_t test_vmv_v_x_i16mf2_tu (vint16mf2_t maskedoff, int16_t src, size_t vl) { + return vmv_v_x_i16mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2_ta( +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m1_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmv_v_v_i16m1_tu (vint16m1_t maskedoff, vint16m1_t src, size_t vl) { + return vmv_v_v_i16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmv_v_x_i16m1_tu (vint16m1_t maskedoff, int16_t src, size_t vl) { + return vmv_v_x_i16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmv_v_v_i16m2_tu (vint16m2_t maskedoff, vint16m2_t src, size_t vl) { + return vmv_v_v_i16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmv_v_x_i16m2_tu (vint16m2_t maskedoff, int16_t src, size_t vl) { + return vmv_v_x_i16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmv_v_v_i16m4_tu (vint16m4_t maskedoff, vint16m4_t src, size_t vl) { + return vmv_v_v_i16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmv_v_x_i16m4_tu (vint16m4_t maskedoff, int16_t src, size_t vl) { + return vmv_v_x_i16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmv_v_v_i16m8_tu (vint16m8_t maskedoff, vint16m8_t src, size_t vl) { + return vmv_v_v_i16m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmv_v_x_i16m8_tu (vint16m8_t maskedoff, int16_t src, size_t vl) { + return vmv_v_x_i16m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32mf2_t test_vmv_s_x_i32mf2_ta (int32_t src, size_t vl) { - return vmv_s_x_i32mf2_ta(src, vl); +vint32mf2_t test_vmv_v_v_i32mf2_tu (vint32mf2_t maskedoff, vint32mf2_t src, size_t vl) { + return vmv_v_v_i32mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2_ta( +// CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2_tu( // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32mf2_t test_vmv_s_x_u32mf2_ta (uint32_t src, size_t vl) { - return vmv_s_x_u32mf2_ta(src, vl); +vint32mf2_t test_vmv_v_x_i32mf2_tu (vint32mf2_t maskedoff, int32_t src, size_t vl) { + return vmv_v_x_i32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmv_v_v_i32m1_tu (vint32m1_t maskedoff, vint32m1_t src, size_t vl) { + return vmv_v_v_i32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmv_v_x_i32m1_tu (vint32m1_t maskedoff, int32_t src, size_t vl) { + return vmv_v_x_i32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmv_v_v_i32m2_tu (vint32m2_t maskedoff, vint32m2_t src, size_t vl) { + return vmv_v_v_i32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmv_v_x_i32m2_tu (vint32m2_t maskedoff, int32_t src, size_t vl) { + return vmv_v_x_i32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmv_v_v_i32m4_tu (vint32m4_t maskedoff, vint32m4_t src, size_t vl) { + return vmv_v_v_i32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmv_v_x_i32m4_tu (vint32m4_t maskedoff, int32_t src, size_t vl) { + return vmv_v_x_i32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmv_v_v_i32m8_tu (vint32m8_t maskedoff, vint32m8_t src, size_t vl) { + return vmv_v_v_i32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmv_v_x_i32m8_tu (vint32m8_t maskedoff, int32_t src, size_t vl) { + return vmv_v_x_i32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmv_v_v_i64m1_tu (vint64m1_t maskedoff, vint64m1_t src, size_t vl) { + return vmv_v_v_i64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmv_v_x_i64m1_tu (vint64m1_t maskedoff, int64_t src, size_t vl) { + return vmv_v_x_i64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmv_v_v_i64m2_tu (vint64m2_t maskedoff, vint64m2_t src, size_t vl) { + return vmv_v_v_i64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmv_v_x_i64m2_tu (vint64m2_t maskedoff, int64_t src, size_t vl) { + return vmv_v_x_i64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmv_v_v_i64m4_tu (vint64m4_t maskedoff, vint64m4_t src, size_t vl) { + return vmv_v_v_i64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmv_v_x_i64m4_tu (vint64m4_t maskedoff, int64_t src, size_t vl) { + return vmv_v_x_i64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmv_v_v_i64m8_tu (vint64m8_t maskedoff, vint64m8_t src, size_t vl) { + return vmv_v_v_i64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmv_v_x_i64m8_tu (vint64m8_t maskedoff, int64_t src, size_t vl) { + return vmv_v_x_i64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmv_v_v_u8mf8_tu (vuint8mf8_t maskedoff, vuint8mf8_t src, size_t vl) { + return vmv_v_v_u8mf8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmv_v_x_u8mf8_tu (vuint8mf8_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_x_u8mf8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmv_v_v_u8mf4_tu (vuint8mf4_t maskedoff, vuint8mf4_t src, size_t vl) { + return vmv_v_v_u8mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmv_v_x_u8mf4_tu (vuint8mf4_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_x_u8mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmv_v_v_u8mf2_tu (vuint8mf2_t maskedoff, vuint8mf2_t src, size_t vl) { + return vmv_v_v_u8mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmv_v_x_u8mf2_tu (vuint8mf2_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_x_u8mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmv_v_v_u8m1_tu (vuint8m1_t maskedoff, vuint8m1_t src, size_t vl) { + return vmv_v_v_u8m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmv_v_x_u8m1_tu (vuint8m1_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_x_u8m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmv_v_v_u8m2_tu (vuint8m2_t maskedoff, vuint8m2_t src, size_t vl) { + return vmv_v_v_u8m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmv_v_x_u8m2_tu (vuint8m2_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_x_u8m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmv_v_v_u8m4_tu (vuint8m4_t maskedoff, vuint8m4_t src, size_t vl) { + return vmv_v_v_u8m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmv_v_x_u8m4_tu (vuint8m4_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_x_u8m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmv_v_v_u8m8_tu (vuint8m8_t maskedoff, vuint8m8_t src, size_t vl) { + return vmv_v_v_u8m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv64i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmv_v_x_u8m8_tu (vuint8m8_t maskedoff, uint8_t src, size_t vl) { + return vmv_v_x_u8m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmv_v_v_u16mf4_tu (vuint16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { + return vmv_v_v_u16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmv_v_x_u16mf4_tu (vuint16mf4_t maskedoff, uint16_t src, size_t vl) { + return vmv_v_x_u16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmv_v_v_u16mf2_tu (vuint16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { + return vmv_v_v_u16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmv_v_x_u16mf2_tu (vuint16mf2_t maskedoff, uint16_t src, size_t vl) { + return vmv_v_x_u16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmv_v_v_u16m1_tu (vuint16m1_t maskedoff, vuint16m1_t src, size_t vl) { + return vmv_v_v_u16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmv_v_x_u16m1_tu (vuint16m1_t maskedoff, uint16_t src, size_t vl) { + return vmv_v_x_u16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmv_v_v_u16m2_tu (vuint16m2_t maskedoff, vuint16m2_t src, size_t vl) { + return vmv_v_v_u16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmv_v_x_u16m2_tu (vuint16m2_t maskedoff, uint16_t src, size_t vl) { + return vmv_v_x_u16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmv_v_v_u16m4_tu (vuint16m4_t maskedoff, vuint16m4_t src, size_t vl) { + return vmv_v_v_u16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmv_v_x_u16m4_tu (vuint16m4_t maskedoff, uint16_t src, size_t vl) { + return vmv_v_x_u16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmv_v_v_u16m8_tu (vuint16m8_t maskedoff, vuint16m8_t src, size_t vl) { + return vmv_v_v_u16m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmv_v_x_u16m8_tu (vuint16m8_t maskedoff, uint16_t src, size_t vl) { + return vmv_v_x_u16m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmv_v_v_u32mf2_tu (vuint32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { + return vmv_v_v_u32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmv_v_x_u32mf2_tu (vuint32mf2_t maskedoff, uint32_t src, size_t vl) { + return vmv_v_x_u32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmv_v_v_u32m1_tu (vuint32m1_t maskedoff, vuint32m1_t src, size_t vl) { + return vmv_v_v_u32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmv_v_x_u32m1_tu (vuint32m1_t maskedoff, uint32_t src, size_t vl) { + return vmv_v_x_u32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmv_v_v_u32m2_tu (vuint32m2_t maskedoff, vuint32m2_t src, size_t vl) { + return vmv_v_v_u32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmv_v_x_u32m2_tu (vuint32m2_t maskedoff, uint32_t src, size_t vl) { + return vmv_v_x_u32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmv_v_v_u32m4_tu (vuint32m4_t maskedoff, vuint32m4_t src, size_t vl) { + return vmv_v_v_u32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmv_v_x_u32m4_tu (vuint32m4_t maskedoff, uint32_t src, size_t vl) { + return vmv_v_x_u32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmv_v_v_u32m8_tu (vuint32m8_t maskedoff, vuint32m8_t src, size_t vl) { + return vmv_v_v_u32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmv_v_x_u32m8_tu (vuint32m8_t maskedoff, uint32_t src, size_t vl) { + return vmv_v_x_u32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmv_v_v_u64m1_tu (vuint64m1_t maskedoff, vuint64m1_t src, size_t vl) { + return vmv_v_v_u64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmv_v_x_u64m1_tu (vuint64m1_t maskedoff, uint64_t src, size_t vl) { + return vmv_v_x_u64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmv_v_v_u64m2_tu (vuint64m2_t maskedoff, vuint64m2_t src, size_t vl) { + return vmv_v_v_u64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmv_v_x_u64m2_tu (vuint64m2_t maskedoff, uint64_t src, size_t vl) { + return vmv_v_x_u64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmv_v_v_u64m4_tu (vuint64m4_t maskedoff, vuint64m4_t src, size_t vl) { + return vmv_v_v_u64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmv_v_x_u64m4_tu (vuint64m4_t maskedoff, uint64_t src, size_t vl) { + return vmv_v_x_u64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmv_v_v_u64m8_tu (vuint64m8_t maskedoff, vuint64m8_t src, size_t vl) { + return vmv_v_v_u64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmv_v_x_u64m8_tu (vuint64m8_t maskedoff, uint64_t src, size_t vl) { + return vmv_v_x_u64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmv_v_v_i8mf8_ta (vint8mf8_t src, size_t vl) { + return vmv_v_v_i8mf8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmv_v_x_i8mf8_ta (int8_t src, size_t vl) { + return vmv_v_x_i8mf8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmv_v_v_i8mf4_ta (vint8mf4_t src, size_t vl) { + return vmv_v_v_i8mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmv_v_x_i8mf4_ta (int8_t src, size_t vl) { + return vmv_v_x_i8mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmv_v_v_i8mf2_ta (vint8mf2_t src, size_t vl) { + return vmv_v_v_i8mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmv_v_x_i8mf2_ta (int8_t src, size_t vl) { + return vmv_v_x_i8mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmv_v_v_i8m1_ta (vint8m1_t src, size_t vl) { + return vmv_v_v_i8m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmv_v_x_i8m1_ta (int8_t src, size_t vl) { + return vmv_v_x_i8m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmv_v_v_i8m2_ta (vint8m2_t src, size_t vl) { + return vmv_v_v_i8m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmv_v_x_i8m2_ta (int8_t src, size_t vl) { + return vmv_v_x_i8m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmv_v_v_i8m4_ta (vint8m4_t src, size_t vl) { + return vmv_v_v_i8m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmv_v_x_i8m4_ta (int8_t src, size_t vl) { + return vmv_v_x_i8m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmv_v_v_i8m8_ta (vint8m8_t src, size_t vl) { + return vmv_v_v_i8m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv64i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmv_v_x_i8m8_ta (int8_t src, size_t vl) { + return vmv_v_x_i8m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmv_v_v_i16mf4_ta (vint16mf4_t src, size_t vl) { + return vmv_v_v_i16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmv_v_x_i16mf4_ta (int16_t src, size_t vl) { + return vmv_v_x_i16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmv_v_v_i16mf2_ta (vint16mf2_t src, size_t vl) { + return vmv_v_v_i16mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmv_v_x_i16mf2_ta (int16_t src, size_t vl) { + return vmv_v_x_i16mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmv_v_v_i16m1_ta (vint16m1_t src, size_t vl) { + return vmv_v_v_i16m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmv_v_x_i16m1_ta (int16_t src, size_t vl) { + return vmv_v_x_i16m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmv_v_v_i16m2_ta (vint16m2_t src, size_t vl) { + return vmv_v_v_i16m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmv_v_x_i16m2_ta (int16_t src, size_t vl) { + return vmv_v_x_i16m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmv_v_v_i16m4_ta (vint16m4_t src, size_t vl) { + return vmv_v_v_i16m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmv_v_x_i16m4_ta (int16_t src, size_t vl) { + return vmv_v_x_i16m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmv_v_v_i16m8_ta (vint16m8_t src, size_t vl) { + return vmv_v_v_i16m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmv_v_x_i16m8_ta (int16_t src, size_t vl) { + return vmv_v_x_i16m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmv_v_v_i32mf2_ta (vint32mf2_t src, size_t vl) { + return vmv_v_v_i32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmv_v_x_i32mf2_ta (int32_t src, size_t vl) { + return vmv_v_x_i32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmv_v_v_i32m1_ta (vint32m1_t src, size_t vl) { + return vmv_v_v_i32m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmv_v_x_i32m1_ta (int32_t src, size_t vl) { + return vmv_v_x_i32m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmv_v_v_i32m2_ta (vint32m2_t src, size_t vl) { + return vmv_v_v_i32m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmv_v_x_i32m2_ta (int32_t src, size_t vl) { + return vmv_v_x_i32m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmv_v_v_i32m4_ta (vint32m4_t src, size_t vl) { + return vmv_v_v_i32m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmv_v_x_i32m4_ta (int32_t src, size_t vl) { + return vmv_v_x_i32m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmv_v_v_i32m8_ta (vint32m8_t src, size_t vl) { + return vmv_v_v_i32m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmv_v_x_i32m8_ta (int32_t src, size_t vl) { + return vmv_v_x_i32m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmv_v_v_i64m1_ta (vint64m1_t src, size_t vl) { + return vmv_v_v_i64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmv_v_x_i64m1_ta (int64_t src, size_t vl) { + return vmv_v_x_i64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmv_v_v_i64m2_ta (vint64m2_t src, size_t vl) { + return vmv_v_v_i64m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmv_v_x_i64m2_ta (int64_t src, size_t vl) { + return vmv_v_x_i64m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmv_v_v_i64m4_ta (vint64m4_t src, size_t vl) { + return vmv_v_v_i64m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmv_v_x_i64m4_ta (int64_t src, size_t vl) { + return vmv_v_x_i64m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmv_v_v_i64m8_ta (vint64m8_t src, size_t vl) { + return vmv_v_v_i64m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmv_v_x_i64m8_ta (int64_t src, size_t vl) { + return vmv_v_x_i64m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmv_v_v_u8mf8_ta (vuint8mf8_t src, size_t vl) { + return vmv_v_v_u8mf8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmv_v_x_u8mf8_ta (uint8_t src, size_t vl) { + return vmv_v_x_u8mf8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmv_v_v_u8mf4_ta (vuint8mf4_t src, size_t vl) { + return vmv_v_v_u8mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmv_v_x_u8mf4_ta (uint8_t src, size_t vl) { + return vmv_v_x_u8mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmv_v_v_u8mf2_ta (vuint8mf2_t src, size_t vl) { + return vmv_v_v_u8mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmv_v_x_u8mf2_ta (uint8_t src, size_t vl) { + return vmv_v_x_u8mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmv_v_v_u8m1_ta (vuint8m1_t src, size_t vl) { + return vmv_v_v_u8m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmv_v_x_u8m1_ta (uint8_t src, size_t vl) { + return vmv_v_x_u8m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmv_v_v_u8m2_ta (vuint8m2_t src, size_t vl) { + return vmv_v_v_u8m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmv_v_x_u8m2_ta (uint8_t src, size_t vl) { + return vmv_v_x_u8m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmv_v_v_u8m4_ta (vuint8m4_t src, size_t vl) { + return vmv_v_v_u8m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmv_v_x_u8m4_ta (uint8_t src, size_t vl) { + return vmv_v_x_u8m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv64i8.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmv_v_v_u8m8_ta (vuint8m8_t src, size_t vl) { + return vmv_v_v_u8m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv64i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmv_v_x_u8m8_ta (uint8_t src, size_t vl) { + return vmv_v_x_u8m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmv_v_v_u16mf4_ta (vuint16mf4_t src, size_t vl) { + return vmv_v_v_u16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmv_v_x_u16mf4_ta (uint16_t src, size_t vl) { + return vmv_v_x_u16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmv_v_v_u16mf2_ta (vuint16mf2_t src, size_t vl) { + return vmv_v_v_u16mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmv_v_x_u16mf2_ta (uint16_t src, size_t vl) { + return vmv_v_x_u16mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmv_v_v_u16m1_ta (vuint16m1_t src, size_t vl) { + return vmv_v_v_u16m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmv_v_x_u16m1_ta (uint16_t src, size_t vl) { + return vmv_v_x_u16m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmv_v_v_u16m2_ta (vuint16m2_t src, size_t vl) { + return vmv_v_v_u16m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmv_v_x_u16m2_ta (uint16_t src, size_t vl) { + return vmv_v_x_u16m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmv_v_v_u16m4_ta (vuint16m4_t src, size_t vl) { + return vmv_v_v_u16m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmv_v_x_u16m4_ta (uint16_t src, size_t vl) { + return vmv_v_x_u16m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmv_v_v_u16m8_ta (vuint16m8_t src, size_t vl) { + return vmv_v_v_u16m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv32i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmv_v_x_u16m8_ta (uint16_t src, size_t vl) { + return vmv_v_x_u16m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmv_v_v_u32mf2_ta (vuint32mf2_t src, size_t vl) { + return vmv_v_v_u32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmv_v_x_u32mf2_ta (uint32_t src, size_t vl) { + return vmv_v_x_u32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmv_v_v_u32m1_ta (vuint32m1_t src, size_t vl) { + return vmv_v_v_u32m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmv_v_x_u32m1_ta (uint32_t src, size_t vl) { + return vmv_v_x_u32m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmv_v_v_u32m2_ta (vuint32m2_t src, size_t vl) { + return vmv_v_v_u32m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmv_v_x_u32m2_ta (uint32_t src, size_t vl) { + return vmv_v_x_u32m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmv_v_v_u32m4_ta (vuint32m4_t src, size_t vl) { + return vmv_v_v_u32m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmv_v_x_u32m4_ta (uint32_t src, size_t vl) { + return vmv_v_x_u32m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmv_v_v_u32m8_ta (vuint32m8_t src, size_t vl) { + return vmv_v_v_u32m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv16i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmv_v_x_u32m8_ta (uint32_t src, size_t vl) { + return vmv_v_x_u32m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmv_v_v_u64m1_ta (vuint64m1_t src, size_t vl) { + return vmv_v_v_u64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv1i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmv_v_x_u64m1_ta (uint64_t src, size_t vl) { + return vmv_v_x_u64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmv_v_v_u64m2_ta (vuint64m2_t src, size_t vl) { + return vmv_v_v_u64m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv2i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmv_v_x_u64m2_ta (uint64_t src, size_t vl) { + return vmv_v_x_u64m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmv_v_v_u64m4_ta (vuint64m4_t src, size_t vl) { + return vmv_v_v_u64m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv4i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmv_v_x_u64m4_ta (uint64_t src, size_t vl) { + return vmv_v_x_u64m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmv_v_v_u64m8_ta (vuint64m8_t src, size_t vl) { + return vmv_v_v_u64m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_x_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.x.nxv8i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmv_v_x_u64m8_ta (uint64_t src, size_t vl) { + return vmv_v_x_u64m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmv_v_v_f16mf4_tu (vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return vmv_v_v_f16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmv_v_v_f16mf2_tu (vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return vmv_v_v_f16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmv_v_v_f16m1_tu (vfloat16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return vmv_v_v_f16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmv_v_v_f16m2_tu (vfloat16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return vmv_v_v_f16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmv_v_v_f16m4_tu (vfloat16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return vmv_v_v_f16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmv_v_v_f16m8_tu (vfloat16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return vmv_v_v_f16m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmv_v_v_f32mf2_tu (vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return vmv_v_v_f32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vmv_v_v_f32m1_tu (vfloat32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return vmv_v_v_f32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vmv_v_v_f32m2_tu (vfloat32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return vmv_v_v_f32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vmv_v_v_f32m4_tu (vfloat32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return vmv_v_v_f32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vmv_v_v_f32m8_tu (vfloat32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return vmv_v_v_f32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vmv_v_v_f64m1_tu (vfloat64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return vmv_v_v_f64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vmv_v_v_f64m2_tu (vfloat64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return vmv_v_v_f64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vmv_v_v_f64m4_tu (vfloat64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return vmv_v_v_f64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vmv_v_v_f64m8_tu (vfloat64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return vmv_v_v_f64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vmv_v_v_f16mf4_ta (vfloat16mf4_t src, size_t vl) { + return vmv_v_v_f16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vmv_v_v_f16mf2_ta (vfloat16mf2_t src, size_t vl) { + return vmv_v_v_f16mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vmv_v_v_f16m1_ta (vfloat16m1_t src, size_t vl) { + return vmv_v_v_f16m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vmv_v_v_f16m2_ta (vfloat16m2_t src, size_t vl) { + return vmv_v_v_f16m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vmv_v_v_f16m4_ta (vfloat16m4_t src, size_t vl) { + return vmv_v_v_f16m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m8_t test_vmv_v_v_f16m8_ta (vfloat16m8_t src, size_t vl) { + return vmv_v_v_f16m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vmv_v_v_f32mf2_ta (vfloat32mf2_t src, size_t vl) { + return vmv_v_v_f32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vmv_v_v_f32m1_ta (vfloat32m1_t src, size_t vl) { + return vmv_v_v_f32m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vmv_v_v_f32m2_ta (vfloat32m2_t src, size_t vl) { + return vmv_v_v_f32m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vmv_v_v_f32m4_ta (vfloat32m4_t src, size_t vl) { + return vmv_v_v_f32m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vmv_v_v_f32m8_ta (vfloat32m8_t src, size_t vl) { + return vmv_v_v_f32m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vmv_v_v_f64m1_ta (vfloat64m1_t src, size_t vl) { + return vmv_v_v_f64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vmv_v_v_f64m2_ta (vfloat64m2_t src, size_t vl) { + return vmv_v_v_f64m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vmv_v_v_f64m4_ta (vfloat64m4_t src, size_t vl) { + return vmv_v_v_f64m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_v_v_f64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.v.v.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vmv_v_v_f64m8_ta (vfloat64m8_t src, size_t vl) { + return vmv_v_v_f64m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmv_s_x_i8mf8_tu (vint8mf8_t maskedoff, int8_t src, size_t vl) { + return vmv_s_x_i8mf8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmv_s_x_i8mf4_tu (vint8mf4_t maskedoff, int8_t src, size_t vl) { + return vmv_s_x_i8mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmv_s_x_i8mf2_tu (vint8mf2_t maskedoff, int8_t src, size_t vl) { + return vmv_s_x_i8mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmv_s_x_i8m1_tu (vint8m1_t maskedoff, int8_t src, size_t vl) { + return vmv_s_x_i8m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmv_s_x_i8m2_tu (vint8m2_t maskedoff, int8_t src, size_t vl) { + return vmv_s_x_i8m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmv_s_x_i8m4_tu (vint8m4_t maskedoff, int8_t src, size_t vl) { + return vmv_s_x_i8m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmv_s_x_i8m8_tu (vint8m8_t maskedoff, int8_t src, size_t vl) { + return vmv_s_x_i8m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmv_s_x_i16mf4_tu (vint16mf4_t maskedoff, int16_t src, size_t vl) { + return vmv_s_x_i16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmv_s_x_i16mf2_tu (vint16mf2_t maskedoff, int16_t src, size_t vl) { + return vmv_s_x_i16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmv_s_x_i16m1_tu (vint16m1_t maskedoff, int16_t src, size_t vl) { + return vmv_s_x_i16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmv_s_x_i16m2_tu (vint16m2_t maskedoff, int16_t src, size_t vl) { + return vmv_s_x_i16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmv_s_x_i16m4_tu (vint16m4_t maskedoff, int16_t src, size_t vl) { + return vmv_s_x_i16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmv_s_x_i16m8_tu (vint16m8_t maskedoff, int16_t src, size_t vl) { + return vmv_s_x_i16m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmv_s_x_i32mf2_tu (vint32mf2_t maskedoff, int32_t src, size_t vl) { + return vmv_s_x_i32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmv_s_x_i32m1_tu (vint32m1_t maskedoff, int32_t src, size_t vl) { + return vmv_s_x_i32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmv_s_x_i32m2_tu (vint32m2_t maskedoff, int32_t src, size_t vl) { + return vmv_s_x_i32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmv_s_x_i32m4_tu (vint32m4_t maskedoff, int32_t src, size_t vl) { + return vmv_s_x_i32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmv_s_x_i32m8_tu (vint32m8_t maskedoff, int32_t src, size_t vl) { + return vmv_s_x_i32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmv_s_x_i64m1_tu (vint64m1_t maskedoff, int64_t src, size_t vl) { + return vmv_s_x_i64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmv_s_x_i64m2_tu (vint64m2_t maskedoff, int64_t src, size_t vl) { + return vmv_s_x_i64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmv_s_x_i64m4_tu (vint64m4_t maskedoff, int64_t src, size_t vl) { + return vmv_s_x_i64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmv_s_x_i64m8_tu (vint64m8_t maskedoff, int64_t src, size_t vl) { + return vmv_s_x_i64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmv_s_x_u8mf8_tu (vuint8mf8_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_x_u8mf8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmv_s_x_u8mf4_tu (vuint8mf4_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_x_u8mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmv_s_x_u8mf2_tu (vuint8mf2_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_x_u8mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmv_s_x_u8m1_tu (vuint8m1_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_x_u8m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmv_s_x_u8m2_tu (vuint8m2_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_x_u8m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmv_s_x_u8m4_tu (vuint8m4_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_x_u8m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( [[MASKEDOFF:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmv_s_x_u8m8_tu (vuint8m8_t maskedoff, uint8_t src, size_t vl) { + return vmv_s_x_u8m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmv_s_x_u16mf4_tu (vuint16mf4_t maskedoff, uint16_t src, size_t vl) { + return vmv_s_x_u16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmv_s_x_u16mf2_tu (vuint16mf2_t maskedoff, uint16_t src, size_t vl) { + return vmv_s_x_u16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmv_s_x_u16m1_tu (vuint16m1_t maskedoff, uint16_t src, size_t vl) { + return vmv_s_x_u16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmv_s_x_u16m2_tu (vuint16m2_t maskedoff, uint16_t src, size_t vl) { + return vmv_s_x_u16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmv_s_x_u16m4_tu (vuint16m4_t maskedoff, uint16_t src, size_t vl) { + return vmv_s_x_u16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( [[MASKEDOFF:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmv_s_x_u16m8_tu (vuint16m8_t maskedoff, uint16_t src, size_t vl) { + return vmv_s_x_u16m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmv_s_x_u32mf2_tu (vuint32mf2_t maskedoff, uint32_t src, size_t vl) { + return vmv_s_x_u32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmv_s_x_u32m1_tu (vuint32m1_t maskedoff, uint32_t src, size_t vl) { + return vmv_s_x_u32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmv_s_x_u32m2_tu (vuint32m2_t maskedoff, uint32_t src, size_t vl) { + return vmv_s_x_u32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmv_s_x_u32m4_tu (vuint32m4_t maskedoff, uint32_t src, size_t vl) { + return vmv_s_x_u32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( [[MASKEDOFF:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmv_s_x_u32m8_tu (vuint32m8_t maskedoff, uint32_t src, size_t vl) { + return vmv_s_x_u32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmv_s_x_u64m1_tu (vuint64m1_t maskedoff, uint64_t src, size_t vl) { + return vmv_s_x_u64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmv_s_x_u64m2_tu (vuint64m2_t maskedoff, uint64_t src, size_t vl) { + return vmv_s_x_u64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmv_s_x_u64m4_tu (vuint64m4_t maskedoff, uint64_t src, size_t vl) { + return vmv_s_x_u64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( [[MASKEDOFF:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmv_s_x_u64m8_tu (vuint64m8_t maskedoff, uint64_t src, size_t vl) { + return vmv_s_x_u64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vmv_s_x_i8mf8_ta (int8_t src, size_t vl) { + return vmv_s_x_i8mf8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vmv_s_x_i8mf4_ta (int8_t src, size_t vl) { + return vmv_s_x_i8mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vmv_s_x_i8mf2_ta (int8_t src, size_t vl) { + return vmv_s_x_i8mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vmv_s_x_i8m1_ta (int8_t src, size_t vl) { + return vmv_s_x_i8m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vmv_s_x_i8m2_ta (int8_t src, size_t vl) { + return vmv_s_x_i8m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vmv_s_x_i8m4_ta (int8_t src, size_t vl) { + return vmv_s_x_i8m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vmv_s_x_i8m8_ta (int8_t src, size_t vl) { + return vmv_s_x_i8m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vmv_s_x_i16mf4_ta (int16_t src, size_t vl) { + return vmv_s_x_i16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vmv_s_x_i16mf2_ta (int16_t src, size_t vl) { + return vmv_s_x_i16mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vmv_s_x_i16m1_ta (int16_t src, size_t vl) { + return vmv_s_x_i16m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vmv_s_x_i16m2_ta (int16_t src, size_t vl) { + return vmv_s_x_i16m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vmv_s_x_i16m4_ta (int16_t src, size_t vl) { + return vmv_s_x_i16m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vmv_s_x_i16m8_ta (int16_t src, size_t vl) { + return vmv_s_x_i16m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vmv_s_x_i32mf2_ta (int32_t src, size_t vl) { + return vmv_s_x_i32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vmv_s_x_i32m1_ta (int32_t src, size_t vl) { + return vmv_s_x_i32m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vmv_s_x_i32m2_ta (int32_t src, size_t vl) { + return vmv_s_x_i32m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vmv_s_x_i32m4_ta (int32_t src, size_t vl) { + return vmv_s_x_i32m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vmv_s_x_i32m8_ta (int32_t src, size_t vl) { + return vmv_s_x_i32m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vmv_s_x_i64m1_ta (int64_t src, size_t vl) { + return vmv_s_x_i64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vmv_s_x_i64m2_ta (int64_t src, size_t vl) { + return vmv_s_x_i64m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vmv_s_x_i64m4_ta (int64_t src, size_t vl) { + return vmv_s_x_i64m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_i64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vmv_s_x_i64m8_ta (int64_t src, size_t vl) { + return vmv_s_x_i64m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vmv_s_x_u8mf8_ta (uint8_t src, size_t vl) { + return vmv_s_x_u8mf8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vmv_s_x_u8mf4_ta (uint8_t src, size_t vl) { + return vmv_s_x_u8mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vmv_s_x_u8mf2_ta (uint8_t src, size_t vl) { + return vmv_s_x_u8mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vmv_s_x_u8m1_ta (uint8_t src, size_t vl) { + return vmv_s_x_u8m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vmv_s_x_u8m2_ta (uint8_t src, size_t vl) { + return vmv_s_x_u8m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vmv_s_x_u8m4_ta (uint8_t src, size_t vl) { + return vmv_s_x_u8m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u8m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv64i8.i64( poison, i8 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m8_t test_vmv_s_x_u8m8_ta (uint8_t src, size_t vl) { + return vmv_s_x_u8m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vmv_s_x_u16mf4_ta (uint16_t src, size_t vl) { + return vmv_s_x_u16mf4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vmv_s_x_u16mf2_ta (uint16_t src, size_t vl) { + return vmv_s_x_u16mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vmv_s_x_u16m1_ta (uint16_t src, size_t vl) { + return vmv_s_x_u16m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vmv_s_x_u16m2_ta (uint16_t src, size_t vl) { + return vmv_s_x_u16m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vmv_s_x_u16m4_ta (uint16_t src, size_t vl) { + return vmv_s_x_u16m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u16m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv32i16.i64( poison, i16 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vmv_s_x_u16m8_ta (uint16_t src, size_t vl) { + return vmv_s_x_u16m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vmv_s_x_u32mf2_ta (uint32_t src, size_t vl) { + return vmv_s_x_u32mf2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vmv_s_x_u32m1_ta (uint32_t src, size_t vl) { + return vmv_s_x_u32m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vmv_s_x_u32m2_ta (uint32_t src, size_t vl) { + return vmv_s_x_u32m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vmv_s_x_u32m4_ta (uint32_t src, size_t vl) { + return vmv_s_x_u32m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u32m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv16i32.i64( poison, i32 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vmv_s_x_u32m8_ta (uint32_t src, size_t vl) { + return vmv_s_x_u32m8_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u64m1_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv1i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vmv_s_x_u64m1_ta (uint64_t src, size_t vl) { + return vmv_s_x_u64m1_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u64m2_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv2i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vmv_s_x_u64m2_ta (uint64_t src, size_t vl) { + return vmv_s_x_u64m2_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u64m4_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv4i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vmv_s_x_u64m4_ta (uint64_t src, size_t vl) { + return vmv_s_x_u64m4_ta(src, vl); +} + +// CHECK-RV64-LABEL: @test_vmv_s_x_u64m8_ta( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmv.s.x.nxv8i64.i64( poison, i64 [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vmv_s_x_u64m8_ta (uint64_t src, size_t vl) { + return vmv_s_x_u64m8_ta(src, vl); }