diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt.c @@ -16,15 +16,6 @@ return __riscv_vfcvt_x_f_v_i16mf4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -34,15 +25,6 @@ return __riscv_vfcvt_x_f_v_i16mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -52,15 +34,6 @@ return __riscv_vfcvt_x_f_v_i16m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -70,15 +43,6 @@ return __riscv_vfcvt_x_f_v_i16m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -88,15 +52,6 @@ return __riscv_vfcvt_x_f_v_i16m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -106,15 +61,6 @@ return __riscv_vfcvt_x_f_v_i16m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m8(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -124,15 +70,6 @@ return __riscv_vfcvt_xu_f_v_u16mf4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -142,15 +79,6 @@ return __riscv_vfcvt_xu_f_v_u16mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -160,15 +88,6 @@ return __riscv_vfcvt_xu_f_v_u16m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -178,15 +97,6 @@ return __riscv_vfcvt_xu_f_v_u16m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -196,15 +106,6 @@ return __riscv_vfcvt_xu_f_v_u16m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -214,15 +115,6 @@ return __riscv_vfcvt_xu_f_v_u16m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m8(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -340,15 +232,6 @@ return __riscv_vfcvt_x_f_v_i32mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -358,15 +241,6 @@ return __riscv_vfcvt_x_f_v_i32m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -376,15 +250,6 @@ return __riscv_vfcvt_x_f_v_i32m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -394,15 +259,6 @@ return __riscv_vfcvt_x_f_v_i32m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -412,15 +268,6 @@ return __riscv_vfcvt_x_f_v_i32m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m8(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -430,15 +277,6 @@ return __riscv_vfcvt_xu_f_v_u32mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -448,15 +286,6 @@ return __riscv_vfcvt_xu_f_v_u32m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -466,15 +295,6 @@ return __riscv_vfcvt_xu_f_v_u32m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -484,15 +304,6 @@ return __riscv_vfcvt_xu_f_v_u32m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -502,15 +313,6 @@ return __riscv_vfcvt_xu_f_v_u32m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m8(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -610,15 +412,6 @@ return __riscv_vfcvt_x_f_v_i64m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -628,15 +421,6 @@ return __riscv_vfcvt_x_f_v_i64m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -646,15 +430,6 @@ return __riscv_vfcvt_x_f_v_i64m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -664,15 +439,6 @@ return __riscv_vfcvt_x_f_v_i64m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m8(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -682,15 +448,6 @@ return __riscv_vfcvt_xu_f_v_u64m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -700,15 +457,6 @@ return __riscv_vfcvt_xu_f_v_u64m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -718,15 +466,6 @@ return __riscv_vfcvt_xu_f_v_u64m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -736,15 +475,6 @@ return __riscv_vfcvt_xu_f_v_u64m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m8(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -826,15 +556,6 @@ return __riscv_vfcvt_x_f_v_i16mf4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -844,15 +565,6 @@ return __riscv_vfcvt_x_f_v_i16mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -862,15 +574,6 @@ return __riscv_vfcvt_x_f_v_i16m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -880,15 +583,6 @@ return __riscv_vfcvt_x_f_v_i16m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -898,15 +592,6 @@ return __riscv_vfcvt_x_f_v_i16m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -916,15 +601,6 @@ return __riscv_vfcvt_x_f_v_i16m8_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m8_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -934,15 +610,6 @@ return __riscv_vfcvt_xu_f_v_u16mf4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -952,15 +619,6 @@ return __riscv_vfcvt_xu_f_v_u16mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -970,15 +628,6 @@ return __riscv_vfcvt_xu_f_v_u16m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -988,15 +637,6 @@ return __riscv_vfcvt_xu_f_v_u16m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1006,15 +646,6 @@ return __riscv_vfcvt_xu_f_v_u16m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1024,15 +655,6 @@ return __riscv_vfcvt_xu_f_v_u16m8_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m8_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1150,15 +772,6 @@ return __riscv_vfcvt_x_f_v_i32mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1168,15 +781,6 @@ return __riscv_vfcvt_x_f_v_i32m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1186,15 +790,6 @@ return __riscv_vfcvt_x_f_v_i32m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1204,15 +799,6 @@ return __riscv_vfcvt_x_f_v_i32m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1222,15 +808,6 @@ return __riscv_vfcvt_x_f_v_i32m8_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m8_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1240,15 +817,6 @@ return __riscv_vfcvt_xu_f_v_u32mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1258,15 +826,6 @@ return __riscv_vfcvt_xu_f_v_u32m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1276,15 +835,6 @@ return __riscv_vfcvt_xu_f_v_u32m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1294,15 +844,6 @@ return __riscv_vfcvt_xu_f_v_u32m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1312,15 +853,6 @@ return __riscv_vfcvt_xu_f_v_u32m8_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1420,15 +952,6 @@ return __riscv_vfcvt_x_f_v_i64m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1438,15 +961,6 @@ return __riscv_vfcvt_x_f_v_i64m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1456,15 +970,6 @@ return __riscv_vfcvt_x_f_v_i64m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1474,15 +979,6 @@ return __riscv_vfcvt_x_f_v_i64m8_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m8_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1492,15 +988,6 @@ return __riscv_vfcvt_xu_f_v_u64m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1510,15 +997,6 @@ return __riscv_vfcvt_xu_f_v_u64m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1528,15 +1006,6 @@ return __riscv_vfcvt_xu_f_v_u64m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1546,15 +1015,6 @@ return __riscv_vfcvt_xu_f_v_u64m8_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt_rtz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfcvt_rtz.c @@ -0,0 +1,549 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m8_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m8_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m8_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m8_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt.c @@ -16,15 +16,6 @@ return __riscv_vfncvt_x_f_w_i8mf8(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf8(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -34,15 +25,6 @@ return __riscv_vfncvt_x_f_w_i8mf4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -52,15 +34,6 @@ return __riscv_vfncvt_x_f_w_i8mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -70,15 +43,6 @@ return __riscv_vfncvt_x_f_w_i8m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -88,15 +52,6 @@ return __riscv_vfncvt_x_f_w_i8m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -106,15 +61,6 @@ return __riscv_vfncvt_x_f_w_i8m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -124,15 +70,6 @@ return __riscv_vfncvt_xu_f_w_u8mf8(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf8(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -142,15 +79,6 @@ return __riscv_vfncvt_xu_f_w_u8mf4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -160,15 +88,6 @@ return __riscv_vfncvt_xu_f_w_u8mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -178,15 +97,6 @@ return __riscv_vfncvt_xu_f_w_u8m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -196,15 +106,6 @@ return __riscv_vfncvt_xu_f_w_u8m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -214,15 +115,6 @@ return __riscv_vfncvt_xu_f_w_u8m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -232,15 +124,6 @@ return __riscv_vfncvt_x_f_w_i16mf4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -250,15 +133,6 @@ return __riscv_vfncvt_x_f_w_i16mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -268,15 +142,6 @@ return __riscv_vfncvt_x_f_w_i16m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -286,15 +151,6 @@ return __riscv_vfncvt_x_f_w_i16m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -304,15 +160,6 @@ return __riscv_vfncvt_x_f_w_i16m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -322,15 +169,6 @@ return __riscv_vfncvt_xu_f_w_u16mf4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -340,15 +178,6 @@ return __riscv_vfncvt_xu_f_w_u16mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -358,15 +187,6 @@ return __riscv_vfncvt_xu_f_w_u16m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -376,15 +196,6 @@ return __riscv_vfncvt_xu_f_w_u16m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -394,15 +205,6 @@ return __riscv_vfncvt_xu_f_w_u16m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -502,15 +304,6 @@ return __riscv_vfncvt_f_f_w_f16mf4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -520,15 +313,6 @@ return __riscv_vfncvt_f_f_w_f16mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -538,15 +322,6 @@ return __riscv_vfncvt_f_f_w_f16m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -556,15 +331,6 @@ return __riscv_vfncvt_f_f_w_f16m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -574,15 +340,6 @@ return __riscv_vfncvt_f_f_w_f16m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -592,15 +349,6 @@ return __riscv_vfncvt_x_f_w_i32mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -610,15 +358,6 @@ return __riscv_vfncvt_x_f_w_i32m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -628,15 +367,6 @@ return __riscv_vfncvt_x_f_w_i32m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -646,15 +376,6 @@ return __riscv_vfncvt_x_f_w_i32m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -664,15 +385,6 @@ return __riscv_vfncvt_xu_f_w_u32mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -682,15 +394,6 @@ return __riscv_vfncvt_xu_f_w_u32m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -700,15 +403,6 @@ return __riscv_vfncvt_xu_f_w_u32m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -718,15 +412,6 @@ return __riscv_vfncvt_xu_f_w_u32m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -808,15 +493,6 @@ return __riscv_vfncvt_f_f_w_f32mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -826,15 +502,6 @@ return __riscv_vfncvt_f_f_w_f32m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -844,15 +511,6 @@ return __riscv_vfncvt_f_f_w_f32m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -862,15 +520,6 @@ return __riscv_vfncvt_f_f_w_f32m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -880,15 +529,6 @@ return __riscv_vfncvt_x_f_w_i8mf8_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf8_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -898,15 +538,6 @@ return __riscv_vfncvt_x_f_w_i8mf4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -916,15 +547,6 @@ return __riscv_vfncvt_x_f_w_i8mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -934,15 +556,6 @@ return __riscv_vfncvt_x_f_w_i8m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -952,15 +565,6 @@ return __riscv_vfncvt_x_f_w_i8m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -970,15 +574,6 @@ return __riscv_vfncvt_x_f_w_i8m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -988,15 +583,6 @@ return __riscv_vfncvt_xu_f_w_u8mf8_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf8_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1006,15 +592,6 @@ return __riscv_vfncvt_xu_f_w_u8mf4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1024,15 +601,6 @@ return __riscv_vfncvt_xu_f_w_u8mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1042,15 +610,6 @@ return __riscv_vfncvt_xu_f_w_u8m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1060,15 +619,6 @@ return __riscv_vfncvt_xu_f_w_u8m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1078,15 +628,6 @@ return __riscv_vfncvt_xu_f_w_u8m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1096,15 +637,6 @@ return __riscv_vfncvt_x_f_w_i16mf4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1114,15 +646,6 @@ return __riscv_vfncvt_x_f_w_i16mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1132,15 +655,6 @@ return __riscv_vfncvt_x_f_w_i16m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1150,15 +664,6 @@ return __riscv_vfncvt_x_f_w_i16m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1168,15 +673,6 @@ return __riscv_vfncvt_x_f_w_i16m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1186,15 +682,6 @@ return __riscv_vfncvt_xu_f_w_u16mf4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1204,15 +691,6 @@ return __riscv_vfncvt_xu_f_w_u16mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1222,15 +700,6 @@ return __riscv_vfncvt_xu_f_w_u16m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1240,15 +709,6 @@ return __riscv_vfncvt_xu_f_w_u16m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1258,15 +718,6 @@ return __riscv_vfncvt_xu_f_w_u16m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1366,15 +817,6 @@ return __riscv_vfncvt_f_f_w_f16mf4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1384,15 +826,6 @@ return __riscv_vfncvt_f_f_w_f16mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1402,15 +835,6 @@ return __riscv_vfncvt_f_f_w_f16m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1420,15 +844,6 @@ return __riscv_vfncvt_f_f_w_f16m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1438,15 +853,6 @@ return __riscv_vfncvt_f_f_w_f16m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1456,15 +862,6 @@ return __riscv_vfncvt_x_f_w_i32mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1474,15 +871,6 @@ return __riscv_vfncvt_x_f_w_i32m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1492,15 +880,6 @@ return __riscv_vfncvt_x_f_w_i32m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1510,15 +889,6 @@ return __riscv_vfncvt_x_f_w_i32m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1528,15 +898,6 @@ return __riscv_vfncvt_xu_f_w_u32mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1546,15 +907,6 @@ return __riscv_vfncvt_xu_f_w_u32m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1564,15 +916,6 @@ return __riscv_vfncvt_xu_f_w_u32m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1582,15 +925,6 @@ return __riscv_vfncvt_xu_f_w_u32m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1672,15 +1006,6 @@ return __riscv_vfncvt_f_f_w_f32mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1690,15 +1015,6 @@ return __riscv_vfncvt_f_f_w_f32m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1708,15 +1024,6 @@ return __riscv_vfncvt_f_f_w_f32m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1726,12 +1033,3 @@ return __riscv_vfncvt_f_f_w_f32m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m4_m(mask, src, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rod.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rod.c @@ -0,0 +1,171 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m4_m(mask, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rtz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfncvt_rtz.c @@ -0,0 +1,549 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf8_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m4_m(mask, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt.c @@ -124,15 +124,6 @@ return __riscv_vfwcvt_x_f_v_i32mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -142,15 +133,6 @@ return __riscv_vfwcvt_x_f_v_i32m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -160,15 +142,6 @@ return __riscv_vfwcvt_x_f_v_i32m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -178,15 +151,6 @@ return __riscv_vfwcvt_x_f_v_i32m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -196,15 +160,6 @@ return __riscv_vfwcvt_x_f_v_i32m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m8(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -214,15 +169,6 @@ return __riscv_vfwcvt_xu_f_v_u32mf2(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32mf2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -232,15 +178,6 @@ return __riscv_vfwcvt_xu_f_v_u32m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -250,15 +187,6 @@ return __riscv_vfwcvt_xu_f_v_u32m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -268,15 +196,6 @@ return __riscv_vfwcvt_xu_f_v_u32m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -286,15 +205,6 @@ return __riscv_vfwcvt_xu_f_v_u32m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m8(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -439,15 +349,6 @@ return __riscv_vfwcvt_x_f_v_i64m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -457,15 +358,6 @@ return __riscv_vfwcvt_x_f_v_i64m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -475,15 +367,6 @@ return __riscv_vfwcvt_x_f_v_i64m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -493,15 +376,6 @@ return __riscv_vfwcvt_x_f_v_i64m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m8(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -511,15 +385,6 @@ return __riscv_vfwcvt_xu_f_v_u64m1(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m1(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -529,15 +394,6 @@ return __riscv_vfwcvt_xu_f_v_u64m2(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m2(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -547,15 +403,6 @@ return __riscv_vfwcvt_xu_f_v_u64m4(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m4(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -565,15 +412,6 @@ return __riscv_vfwcvt_xu_f_v_u64m8(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m8(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -799,15 +637,6 @@ return __riscv_vfwcvt_x_f_v_i32mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -817,15 +646,6 @@ return __riscv_vfwcvt_x_f_v_i32m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -835,15 +655,6 @@ return __riscv_vfwcvt_x_f_v_i32m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -853,15 +664,6 @@ return __riscv_vfwcvt_x_f_v_i32m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -871,15 +673,6 @@ return __riscv_vfwcvt_x_f_v_i32m8_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m8_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -889,15 +682,6 @@ return __riscv_vfwcvt_xu_f_v_u32mf2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -907,15 +691,6 @@ return __riscv_vfwcvt_xu_f_v_u32m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -925,15 +700,6 @@ return __riscv_vfwcvt_xu_f_v_u32m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -943,15 +709,6 @@ return __riscv_vfwcvt_xu_f_v_u32m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -961,15 +718,6 @@ return __riscv_vfwcvt_xu_f_v_u32m8_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1114,15 +862,6 @@ return __riscv_vfwcvt_x_f_v_i64m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1132,15 +871,6 @@ return __riscv_vfwcvt_x_f_v_i64m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1150,15 +880,6 @@ return __riscv_vfwcvt_x_f_v_i64m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1168,15 +889,6 @@ return __riscv_vfwcvt_x_f_v_i64m8_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m8_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1186,15 +898,6 @@ return __riscv_vfwcvt_xu_f_v_u64m1_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1204,15 +907,6 @@ return __riscv_vfwcvt_xu_f_v_u64m2_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1222,15 +916,6 @@ return __riscv_vfwcvt_xu_f_v_u64m4_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1240,15 +925,6 @@ return __riscv_vfwcvt_xu_f_v_u64m8_m(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt_rtz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwcvt_rtz.c @@ -0,0 +1,333 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m1(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m2(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m4(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m8(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m8_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m8_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_m(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_m(mask, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul_ext_v.c rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul_ext_v.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul_ext_v.c @@ -1222,1218 +1222,3 @@ return __riscv_vlmul_ext_v_u64m4_u64m8(op1); } -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16mf2_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv2f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t op1) { - return __riscv_vlmul_trunc_v_f16mf2_f16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv4f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t op1) { - return __riscv_vlmul_trunc_v_f16m1_f16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv4f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t op1) { - return __riscv_vlmul_trunc_v_f16m1_f16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv8f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t op1) { - return __riscv_vlmul_trunc_v_f16m2_f16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv8f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t op1) { - return __riscv_vlmul_trunc_v_f16m2_f16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv8f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t op1) { - return __riscv_vlmul_trunc_v_f16m2_f16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv16f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_v_f16m4_f16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv16f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_v_f16m4_f16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv16f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_v_f16m4_f16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv16f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_v_f16m4_f16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv32f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_v_f16m8_f16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv32f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_v_f16m8_f16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv32f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_v_f16m8_f16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv32f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_v_f16m8_f16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16f16.nxv32f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_v_f16m8_f16m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { - return __riscv_vlmul_trunc_v_f32m1_f32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { - return __riscv_vlmul_trunc_v_f32m2_f32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { - return __riscv_vlmul_trunc_v_f32m2_f32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { - return __riscv_vlmul_trunc_v_f32m4_f32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { - return __riscv_vlmul_trunc_v_f32m4_f32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { - return __riscv_vlmul_trunc_v_f32m4_f32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_v_f32m8_f32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_v_f32m8_f32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_v_f32m8_f32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_v_f32m8_f32m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { - return __riscv_vlmul_trunc_v_f64m2_f64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { - return __riscv_vlmul_trunc_v_f64m4_f64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { - return __riscv_vlmul_trunc_v_f64m4_f64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { - return __riscv_vlmul_trunc_v_f64m8_f64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { - return __riscv_vlmul_trunc_v_f64m8_f64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { - return __riscv_vlmul_trunc_v_f64m8_f64m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { - return __riscv_vlmul_trunc_v_i8mf4_i8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { - return __riscv_vlmul_trunc_v_i8mf2_i8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { - return __riscv_vlmul_trunc_v_i8mf2_i8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { - return __riscv_vlmul_trunc_v_i8m1_i8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { - return __riscv_vlmul_trunc_v_i8m1_i8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { - return __riscv_vlmul_trunc_v_i8m1_i8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { - return __riscv_vlmul_trunc_v_i8m2_i8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { - return __riscv_vlmul_trunc_v_i8m2_i8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { - return __riscv_vlmul_trunc_v_i8m2_i8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { - return __riscv_vlmul_trunc_v_i8m2_i8m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { - return __riscv_vlmul_trunc_v_i8m4_i8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { - return __riscv_vlmul_trunc_v_i8m4_i8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { - return __riscv_vlmul_trunc_v_i8m4_i8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { - return __riscv_vlmul_trunc_v_i8m4_i8m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { - return __riscv_vlmul_trunc_v_i8m4_i8m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { - return __riscv_vlmul_trunc_v_i8m8_i8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { - return __riscv_vlmul_trunc_v_i8m8_i8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { - return __riscv_vlmul_trunc_v_i8m8_i8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { - return __riscv_vlmul_trunc_v_i8m8_i8m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { - return __riscv_vlmul_trunc_v_i8m8_i8m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { - return __riscv_vlmul_trunc_v_i8m8_i8m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { - return __riscv_vlmul_trunc_v_i16mf2_i16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { - return __riscv_vlmul_trunc_v_i16m1_i16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { - return __riscv_vlmul_trunc_v_i16m1_i16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { - return __riscv_vlmul_trunc_v_i16m2_i16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { - return __riscv_vlmul_trunc_v_i16m2_i16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { - return __riscv_vlmul_trunc_v_i16m2_i16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { - return __riscv_vlmul_trunc_v_i16m4_i16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { - return __riscv_vlmul_trunc_v_i16m4_i16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { - return __riscv_vlmul_trunc_v_i16m4_i16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { - return __riscv_vlmul_trunc_v_i16m4_i16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { - return __riscv_vlmul_trunc_v_i16m8_i16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { - return __riscv_vlmul_trunc_v_i16m8_i16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { - return __riscv_vlmul_trunc_v_i16m8_i16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { - return __riscv_vlmul_trunc_v_i16m8_i16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { - return __riscv_vlmul_trunc_v_i16m8_i16m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { - return __riscv_vlmul_trunc_v_i32m1_i32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { - return __riscv_vlmul_trunc_v_i32m2_i32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { - return __riscv_vlmul_trunc_v_i32m2_i32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { - return __riscv_vlmul_trunc_v_i32m4_i32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { - return __riscv_vlmul_trunc_v_i32m4_i32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { - return __riscv_vlmul_trunc_v_i32m4_i32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { - return __riscv_vlmul_trunc_v_i32m8_i32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { - return __riscv_vlmul_trunc_v_i32m8_i32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { - return __riscv_vlmul_trunc_v_i32m8_i32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { - return __riscv_vlmul_trunc_v_i32m8_i32m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { - return __riscv_vlmul_trunc_v_i64m2_i64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { - return __riscv_vlmul_trunc_v_i64m4_i64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { - return __riscv_vlmul_trunc_v_i64m4_i64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { - return __riscv_vlmul_trunc_v_i64m8_i64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { - return __riscv_vlmul_trunc_v_i64m8_i64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { - return __riscv_vlmul_trunc_v_i64m8_i64m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { - return __riscv_vlmul_trunc_v_u8mf4_u8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { - return __riscv_vlmul_trunc_v_u8mf2_u8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { - return __riscv_vlmul_trunc_v_u8mf2_u8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { - return __riscv_vlmul_trunc_v_u8m1_u8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { - return __riscv_vlmul_trunc_v_u8m1_u8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { - return __riscv_vlmul_trunc_v_u8m1_u8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { - return __riscv_vlmul_trunc_v_u8m2_u8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { - return __riscv_vlmul_trunc_v_u8m2_u8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { - return __riscv_vlmul_trunc_v_u8m2_u8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { - return __riscv_vlmul_trunc_v_u8m2_u8m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { - return __riscv_vlmul_trunc_v_u8m4_u8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { - return __riscv_vlmul_trunc_v_u8m4_u8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { - return __riscv_vlmul_trunc_v_u8m4_u8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { - return __riscv_vlmul_trunc_v_u8m4_u8m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { - return __riscv_vlmul_trunc_v_u8m4_u8m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { - return __riscv_vlmul_trunc_v_u8m8_u8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { - return __riscv_vlmul_trunc_v_u8m8_u8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { - return __riscv_vlmul_trunc_v_u8m8_u8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { - return __riscv_vlmul_trunc_v_u8m8_u8m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { - return __riscv_vlmul_trunc_v_u8m8_u8m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { - return __riscv_vlmul_trunc_v_u8m8_u8m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { - return __riscv_vlmul_trunc_v_u16mf2_u16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { - return __riscv_vlmul_trunc_v_u16m1_u16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { - return __riscv_vlmul_trunc_v_u16m1_u16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { - return __riscv_vlmul_trunc_v_u16m2_u16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { - return __riscv_vlmul_trunc_v_u16m2_u16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { - return __riscv_vlmul_trunc_v_u16m2_u16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { - return __riscv_vlmul_trunc_v_u16m4_u16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { - return __riscv_vlmul_trunc_v_u16m4_u16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { - return __riscv_vlmul_trunc_v_u16m4_u16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { - return __riscv_vlmul_trunc_v_u16m4_u16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { - return __riscv_vlmul_trunc_v_u16m8_u16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { - return __riscv_vlmul_trunc_v_u16m8_u16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { - return __riscv_vlmul_trunc_v_u16m8_u16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { - return __riscv_vlmul_trunc_v_u16m8_u16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { - return __riscv_vlmul_trunc_v_u16m8_u16m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { - return __riscv_vlmul_trunc_v_u32m1_u32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { - return __riscv_vlmul_trunc_v_u32m2_u32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { - return __riscv_vlmul_trunc_v_u32m2_u32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { - return __riscv_vlmul_trunc_v_u32m4_u32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { - return __riscv_vlmul_trunc_v_u32m4_u32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { - return __riscv_vlmul_trunc_v_u32m4_u32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { - return __riscv_vlmul_trunc_v_u32m8_u32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { - return __riscv_vlmul_trunc_v_u32m8_u32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { - return __riscv_vlmul_trunc_v_u32m8_u32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { - return __riscv_vlmul_trunc_v_u32m8_u32m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { - return __riscv_vlmul_trunc_v_u64m2_u64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { - return __riscv_vlmul_trunc_v_u64m4_u64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { - return __riscv_vlmul_trunc_v_u64m4_u64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { - return __riscv_vlmul_trunc_v_u64m8_u64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { - return __riscv_vlmul_trunc_v_u64m8_u64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { - return __riscv_vlmul_trunc_v_u64m8_u64m4(op1); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul_trunc_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul_trunc_v.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlmul_trunc_v.c @@ -0,0 +1,1224 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16mf2_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv2f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t op1) { + return __riscv_vlmul_trunc_v_f16mf2_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv4f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t op1) { + return __riscv_vlmul_trunc_v_f16m1_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv4f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t op1) { + return __riscv_vlmul_trunc_v_f16m1_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t op1) { + return __riscv_vlmul_trunc_v_f16m2_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t op1) { + return __riscv_vlmul_trunc_v_f16m2_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t op1) { + return __riscv_vlmul_trunc_v_f16m2_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t op1) { + return __riscv_vlmul_trunc_v_f16m4_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t op1) { + return __riscv_vlmul_trunc_v_f16m4_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t op1) { + return __riscv_vlmul_trunc_v_f16m4_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t op1) { + return __riscv_vlmul_trunc_v_f16m4_f16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t op1) { + return __riscv_vlmul_trunc_v_f16m8_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t op1) { + return __riscv_vlmul_trunc_v_f16m8_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t op1) { + return __riscv_vlmul_trunc_v_f16m8_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t op1) { + return __riscv_vlmul_trunc_v_f16m8_f16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t op1) { + return __riscv_vlmul_trunc_v_f16m8_f16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { + return __riscv_vlmul_trunc_v_f32m1_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { + return __riscv_vlmul_trunc_v_f32m2_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { + return __riscv_vlmul_trunc_v_f32m2_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { + return __riscv_vlmul_trunc_v_f32m4_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { + return __riscv_vlmul_trunc_v_f32m4_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { + return __riscv_vlmul_trunc_v_f32m4_f32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { + return __riscv_vlmul_trunc_v_f32m8_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { + return __riscv_vlmul_trunc_v_f32m8_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { + return __riscv_vlmul_trunc_v_f32m8_f32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { + return __riscv_vlmul_trunc_v_f32m8_f32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { + return __riscv_vlmul_trunc_v_f64m2_f64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { + return __riscv_vlmul_trunc_v_f64m4_f64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { + return __riscv_vlmul_trunc_v_f64m4_f64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { + return __riscv_vlmul_trunc_v_f64m8_f64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { + return __riscv_vlmul_trunc_v_f64m8_f64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { + return __riscv_vlmul_trunc_v_f64m8_f64m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { + return __riscv_vlmul_trunc_v_i8mf4_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { + return __riscv_vlmul_trunc_v_i8mf2_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { + return __riscv_vlmul_trunc_v_i8mf2_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { + return __riscv_vlmul_trunc_v_i8m1_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { + return __riscv_vlmul_trunc_v_i8m1_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { + return __riscv_vlmul_trunc_v_i8m1_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { + return __riscv_vlmul_trunc_v_i8m2_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { + return __riscv_vlmul_trunc_v_i8m2_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { + return __riscv_vlmul_trunc_v_i8m2_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { + return __riscv_vlmul_trunc_v_i8m2_i8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { + return __riscv_vlmul_trunc_v_i8m4_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { + return __riscv_vlmul_trunc_v_i8m4_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { + return __riscv_vlmul_trunc_v_i8m4_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { + return __riscv_vlmul_trunc_v_i8m4_i8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { + return __riscv_vlmul_trunc_v_i8m4_i8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { + return __riscv_vlmul_trunc_v_i8m8_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { + return __riscv_vlmul_trunc_v_i8m8_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { + return __riscv_vlmul_trunc_v_i8m8_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { + return __riscv_vlmul_trunc_v_i8m8_i8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { + return __riscv_vlmul_trunc_v_i8m8_i8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { + return __riscv_vlmul_trunc_v_i8m8_i8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { + return __riscv_vlmul_trunc_v_i16mf2_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { + return __riscv_vlmul_trunc_v_i16m1_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { + return __riscv_vlmul_trunc_v_i16m1_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { + return __riscv_vlmul_trunc_v_i16m2_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { + return __riscv_vlmul_trunc_v_i16m2_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { + return __riscv_vlmul_trunc_v_i16m2_i16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { + return __riscv_vlmul_trunc_v_i16m4_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { + return __riscv_vlmul_trunc_v_i16m4_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { + return __riscv_vlmul_trunc_v_i16m4_i16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { + return __riscv_vlmul_trunc_v_i16m4_i16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { + return __riscv_vlmul_trunc_v_i16m8_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { + return __riscv_vlmul_trunc_v_i16m8_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { + return __riscv_vlmul_trunc_v_i16m8_i16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { + return __riscv_vlmul_trunc_v_i16m8_i16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { + return __riscv_vlmul_trunc_v_i16m8_i16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { + return __riscv_vlmul_trunc_v_i32m1_i32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { + return __riscv_vlmul_trunc_v_i32m2_i32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { + return __riscv_vlmul_trunc_v_i32m2_i32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { + return __riscv_vlmul_trunc_v_i32m4_i32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { + return __riscv_vlmul_trunc_v_i32m4_i32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { + return __riscv_vlmul_trunc_v_i32m4_i32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { + return __riscv_vlmul_trunc_v_i32m8_i32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { + return __riscv_vlmul_trunc_v_i32m8_i32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { + return __riscv_vlmul_trunc_v_i32m8_i32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { + return __riscv_vlmul_trunc_v_i32m8_i32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { + return __riscv_vlmul_trunc_v_i64m2_i64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { + return __riscv_vlmul_trunc_v_i64m4_i64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { + return __riscv_vlmul_trunc_v_i64m4_i64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { + return __riscv_vlmul_trunc_v_i64m8_i64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { + return __riscv_vlmul_trunc_v_i64m8_i64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { + return __riscv_vlmul_trunc_v_i64m8_i64m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { + return __riscv_vlmul_trunc_v_u8mf4_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { + return __riscv_vlmul_trunc_v_u8mf2_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { + return __riscv_vlmul_trunc_v_u8mf2_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { + return __riscv_vlmul_trunc_v_u8m1_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { + return __riscv_vlmul_trunc_v_u8m1_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { + return __riscv_vlmul_trunc_v_u8m1_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { + return __riscv_vlmul_trunc_v_u8m2_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { + return __riscv_vlmul_trunc_v_u8m2_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { + return __riscv_vlmul_trunc_v_u8m2_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { + return __riscv_vlmul_trunc_v_u8m2_u8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { + return __riscv_vlmul_trunc_v_u8m4_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { + return __riscv_vlmul_trunc_v_u8m4_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { + return __riscv_vlmul_trunc_v_u8m4_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { + return __riscv_vlmul_trunc_v_u8m4_u8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { + return __riscv_vlmul_trunc_v_u8m4_u8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { + return __riscv_vlmul_trunc_v_u8m8_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { + return __riscv_vlmul_trunc_v_u8m8_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { + return __riscv_vlmul_trunc_v_u8m8_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { + return __riscv_vlmul_trunc_v_u8m8_u8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { + return __riscv_vlmul_trunc_v_u8m8_u8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { + return __riscv_vlmul_trunc_v_u8m8_u8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { + return __riscv_vlmul_trunc_v_u16mf2_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { + return __riscv_vlmul_trunc_v_u16m1_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { + return __riscv_vlmul_trunc_v_u16m1_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { + return __riscv_vlmul_trunc_v_u16m2_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { + return __riscv_vlmul_trunc_v_u16m2_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { + return __riscv_vlmul_trunc_v_u16m2_u16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { + return __riscv_vlmul_trunc_v_u16m4_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { + return __riscv_vlmul_trunc_v_u16m4_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { + return __riscv_vlmul_trunc_v_u16m4_u16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { + return __riscv_vlmul_trunc_v_u16m4_u16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { + return __riscv_vlmul_trunc_v_u16m8_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { + return __riscv_vlmul_trunc_v_u16m8_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { + return __riscv_vlmul_trunc_v_u16m8_u16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { + return __riscv_vlmul_trunc_v_u16m8_u16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { + return __riscv_vlmul_trunc_v_u16m8_u16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { + return __riscv_vlmul_trunc_v_u32m1_u32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { + return __riscv_vlmul_trunc_v_u32m2_u32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { + return __riscv_vlmul_trunc_v_u32m2_u32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { + return __riscv_vlmul_trunc_v_u32m4_u32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { + return __riscv_vlmul_trunc_v_u32m4_u32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { + return __riscv_vlmul_trunc_v_u32m4_u32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { + return __riscv_vlmul_trunc_v_u32m8_u32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { + return __riscv_vlmul_trunc_v_u32m8_u32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { + return __riscv_vlmul_trunc_v_u32m8_u32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { + return __riscv_vlmul_trunc_v_u32m8_u32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { + return __riscv_vlmul_trunc_v_u64m2_u64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { + return __riscv_vlmul_trunc_v_u64m4_u64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { + return __riscv_vlmul_trunc_v_u64m4_u64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { + return __riscv_vlmul_trunc_v_u64m8_u64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { + return __riscv_vlmul_trunc_v_u64m8_u64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { + return __riscv_vlmul_trunc_v_u64m8_u64m4(op1); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext_vf2.c rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext_vf2.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext_vf2.c @@ -60,87 +60,6 @@ return __riscv_vsext_vf2_i16m8(op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_i32mf2(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m1(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m2(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m8(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m1(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m2(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m8(op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) @@ -186,42 +105,6 @@ return __riscv_vsext_vf2_i32m8(op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m1(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m2(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m8(op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) @@ -312,87 +195,6 @@ return __riscv_vsext_vf2_i16m8_m(mask, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_i32mf2_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m1_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m2_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m4_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m8_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m1_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m2_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m4_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m8_m(mask, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -438,42 +240,6 @@ return __riscv_vsext_vf2_i32m8_m(mask, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m1_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m2_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m4_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m8_m(mask, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext_vf4.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext_vf4.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext_vf4.c @@ -0,0 +1,170 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf4_i32mf2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m1(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m1(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf4_i32mf2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m1_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint8m2_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m8_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m1_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint16m1_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint16m2_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m8_m(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext_vf8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext_vf8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vsext_vf8.c @@ -0,0 +1,80 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m1(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m1_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m8_m(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext_vf2.c rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext_vf2.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext_vf2.c @@ -60,87 +60,6 @@ return __riscv_vzext_vf2_u16m8(op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_u32mf2(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m1(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m2(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m8(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m1(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m2(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m8(op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) @@ -186,42 +105,6 @@ return __riscv_vzext_vf2_u32m8(op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m1(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m2(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m8(op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) @@ -312,87 +195,6 @@ return __riscv_vzext_vf2_u16m8_m(mask, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_u32mf2_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m1_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m2_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m4_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m8_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m1_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m2_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m4_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m8_m(mask, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -438,42 +240,6 @@ return __riscv_vzext_vf2_u32m8_m(mask, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m1_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m2_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m4_m(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m8_m(mask, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext_vf4.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext_vf4.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext_vf4.c @@ -0,0 +1,170 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf4_u32mf2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m1(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m1(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf4_u32mf2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m1_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m8_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m1_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m8_m(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext_vf8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext_vf8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vzext_vf8.c @@ -0,0 +1,80 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m1(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m2(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m1_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m8_m(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt.c @@ -16,15 +16,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -34,15 +25,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -52,15 +34,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -70,15 +43,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -88,15 +52,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -106,15 +61,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -124,15 +70,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -142,15 +79,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -160,15 +88,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -178,15 +97,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -196,15 +106,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -214,15 +115,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -340,15 +232,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -358,15 +241,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -376,15 +250,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -394,15 +259,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -412,15 +268,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -430,15 +277,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -448,15 +286,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -466,15 +295,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -484,15 +304,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -502,15 +313,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -610,15 +412,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -628,15 +421,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -646,15 +430,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -664,15 +439,6 @@ return __riscv_vfcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -682,15 +448,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -700,15 +457,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -718,15 +466,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -736,15 +475,6 @@ return __riscv_vfcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -826,15 +556,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -844,15 +565,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -862,15 +574,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -880,15 +583,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -898,15 +592,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -916,15 +601,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -934,15 +610,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -952,15 +619,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -970,15 +628,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -988,15 +637,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1006,15 +646,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1024,15 +655,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1150,15 +772,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1168,15 +781,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1186,15 +790,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1204,15 +799,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1222,15 +808,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1240,15 +817,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1258,15 +826,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1276,15 +835,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1294,15 +844,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1312,15 +853,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1420,15 +952,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1438,15 +961,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1456,15 +970,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1474,15 +979,6 @@ return __riscv_vfcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1492,15 +988,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1510,15 +997,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1528,15 +1006,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1546,15 +1015,6 @@ return __riscv_vfcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt_rtz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfcvt_rtz.c @@ -0,0 +1,549 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4(vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2(vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1(vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2(vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4(vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8(vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4(vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2(vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1(vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2(vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4(vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8(vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu(mask, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt.c @@ -16,15 +16,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -34,15 +25,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -52,15 +34,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -70,15 +43,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -88,15 +52,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -106,15 +61,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -124,15 +70,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -142,15 +79,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -160,15 +88,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -178,15 +97,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -196,15 +106,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -214,15 +115,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -232,15 +124,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -250,15 +133,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -268,15 +142,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -286,15 +151,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -304,15 +160,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -322,15 +169,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -340,15 +178,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -358,15 +187,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -376,15 +196,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -394,15 +205,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -502,15 +304,6 @@ return __riscv_vfncvt_f(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -520,15 +313,6 @@ return __riscv_vfncvt_f(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -538,15 +322,6 @@ return __riscv_vfncvt_f(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -556,15 +331,6 @@ return __riscv_vfncvt_f(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -574,15 +340,6 @@ return __riscv_vfncvt_f(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -592,15 +349,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -610,15 +358,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -628,15 +367,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -646,15 +376,6 @@ return __riscv_vfncvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -664,15 +385,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -682,15 +394,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -700,15 +403,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -718,15 +412,6 @@ return __riscv_vfncvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -808,15 +493,6 @@ return __riscv_vfncvt_f(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -826,15 +502,6 @@ return __riscv_vfncvt_f(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -844,15 +511,6 @@ return __riscv_vfncvt_f(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -862,15 +520,6 @@ return __riscv_vfncvt_f(src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f(src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -880,15 +529,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -898,15 +538,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -916,15 +547,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -934,15 +556,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -952,15 +565,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -970,15 +574,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -988,15 +583,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1006,15 +592,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1024,15 +601,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1042,15 +610,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1060,15 +619,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1078,15 +628,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1096,15 +637,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1114,15 +646,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1132,15 +655,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1150,15 +664,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1168,15 +673,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1186,15 +682,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1204,15 +691,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1222,15 +700,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1240,15 +709,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1258,15 +718,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1366,15 +817,6 @@ return __riscv_vfncvt_f(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1384,15 +826,6 @@ return __riscv_vfncvt_f(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1402,15 +835,6 @@ return __riscv_vfncvt_f(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1420,15 +844,6 @@ return __riscv_vfncvt_f(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1438,15 +853,6 @@ return __riscv_vfncvt_f(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1456,15 +862,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1474,15 +871,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1492,15 +880,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1510,15 +889,6 @@ return __riscv_vfncvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1528,15 +898,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1546,15 +907,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1564,15 +916,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1582,15 +925,6 @@ return __riscv_vfncvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1672,15 +1006,6 @@ return __riscv_vfncvt_f(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1690,15 +1015,6 @@ return __riscv_vfncvt_f(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1708,15 +1024,6 @@ return __riscv_vfncvt_f(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1726,12 +1033,3 @@ return __riscv_vfncvt_f(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f(mask, src, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rod.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rod.c @@ -0,0 +1,171 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4(vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2(vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1(vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2(vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4(vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rod_f(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f(mask, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rtz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfncvt_rtz.c @@ -0,0 +1,549 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8(vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4(vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2(vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1(vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2(vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4(vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8(vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4(vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2(vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1(vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2(vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4(vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu(mask, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt.c @@ -124,15 +124,6 @@ return __riscv_vfwcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -142,15 +133,6 @@ return __riscv_vfwcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -160,15 +142,6 @@ return __riscv_vfwcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -178,15 +151,6 @@ return __riscv_vfwcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -196,15 +160,6 @@ return __riscv_vfwcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -214,15 +169,6 @@ return __riscv_vfwcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -232,15 +178,6 @@ return __riscv_vfwcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -250,15 +187,6 @@ return __riscv_vfwcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -268,15 +196,6 @@ return __riscv_vfwcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -286,15 +205,6 @@ return __riscv_vfwcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -439,15 +349,6 @@ return __riscv_vfwcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -457,15 +358,6 @@ return __riscv_vfwcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -475,15 +367,6 @@ return __riscv_vfwcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -493,15 +376,6 @@ return __riscv_vfwcvt_x(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -511,15 +385,6 @@ return __riscv_vfwcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -529,15 +394,6 @@ return __riscv_vfwcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -547,15 +403,6 @@ return __riscv_vfwcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -565,15 +412,6 @@ return __riscv_vfwcvt_xu(src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) @@ -799,15 +637,6 @@ return __riscv_vfwcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -817,15 +646,6 @@ return __riscv_vfwcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -835,15 +655,6 @@ return __riscv_vfwcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -853,15 +664,6 @@ return __riscv_vfwcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -871,15 +673,6 @@ return __riscv_vfwcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -889,15 +682,6 @@ return __riscv_vfwcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -907,15 +691,6 @@ return __riscv_vfwcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -925,15 +700,6 @@ return __riscv_vfwcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -943,15 +709,6 @@ return __riscv_vfwcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -961,15 +718,6 @@ return __riscv_vfwcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1114,15 +862,6 @@ return __riscv_vfwcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1132,15 +871,6 @@ return __riscv_vfwcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1150,15 +880,6 @@ return __riscv_vfwcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1168,15 +889,6 @@ return __riscv_vfwcvt_x(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1186,15 +898,6 @@ return __riscv_vfwcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1204,15 +907,6 @@ return __riscv_vfwcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1222,15 +916,6 @@ return __riscv_vfwcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -1240,15 +925,6 @@ return __riscv_vfwcvt_xu(mask, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu(mask, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt_rtz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwcvt_rtz.c @@ -0,0 +1,333 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2(vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1(vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2(vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4(vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8(vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2(vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1(vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2(vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4(vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8(vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(mask, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu(mask, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul_ext_v.c rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul_ext_v.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul_ext_v.c @@ -1222,1218 +1222,3 @@ return __riscv_vlmul_ext_u64m8(op1); } -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16mf2_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv2f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t op1) { - return __riscv_vlmul_trunc_f16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv4f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t op1) { - return __riscv_vlmul_trunc_f16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv4f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t op1) { - return __riscv_vlmul_trunc_f16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv8f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t op1) { - return __riscv_vlmul_trunc_f16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv8f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t op1) { - return __riscv_vlmul_trunc_f16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv8f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t op1) { - return __riscv_vlmul_trunc_f16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv16f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_f16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv16f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_f16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv16f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_f16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv16f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t op1) { - return __riscv_vlmul_trunc_f16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv32f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_f16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv32f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_f16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv32f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_f16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv32f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_f16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16f16.nxv32f16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t op1) { - return __riscv_vlmul_trunc_f16m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { - return __riscv_vlmul_trunc_f32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { - return __riscv_vlmul_trunc_f32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { - return __riscv_vlmul_trunc_f32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { - return __riscv_vlmul_trunc_f32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { - return __riscv_vlmul_trunc_f32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { - return __riscv_vlmul_trunc_f32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_f32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_f32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_f32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { - return __riscv_vlmul_trunc_f32m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { - return __riscv_vlmul_trunc_f64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { - return __riscv_vlmul_trunc_f64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { - return __riscv_vlmul_trunc_f64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { - return __riscv_vlmul_trunc_f64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { - return __riscv_vlmul_trunc_f64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { - return __riscv_vlmul_trunc_f64m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { - return __riscv_vlmul_trunc_i8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { - return __riscv_vlmul_trunc_i8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { - return __riscv_vlmul_trunc_i8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { - return __riscv_vlmul_trunc_i8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { - return __riscv_vlmul_trunc_i8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { - return __riscv_vlmul_trunc_i8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { - return __riscv_vlmul_trunc_i8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { - return __riscv_vlmul_trunc_i8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { - return __riscv_vlmul_trunc_i8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { - return __riscv_vlmul_trunc_i8m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { - return __riscv_vlmul_trunc_i8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { - return __riscv_vlmul_trunc_i8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { - return __riscv_vlmul_trunc_i8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { - return __riscv_vlmul_trunc_i8m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { - return __riscv_vlmul_trunc_i8m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { - return __riscv_vlmul_trunc_i8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { - return __riscv_vlmul_trunc_i8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { - return __riscv_vlmul_trunc_i8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { - return __riscv_vlmul_trunc_i8m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { - return __riscv_vlmul_trunc_i8m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { - return __riscv_vlmul_trunc_i8m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { - return __riscv_vlmul_trunc_i16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { - return __riscv_vlmul_trunc_i16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { - return __riscv_vlmul_trunc_i16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { - return __riscv_vlmul_trunc_i16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { - return __riscv_vlmul_trunc_i16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { - return __riscv_vlmul_trunc_i16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { - return __riscv_vlmul_trunc_i16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { - return __riscv_vlmul_trunc_i16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { - return __riscv_vlmul_trunc_i16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { - return __riscv_vlmul_trunc_i16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { - return __riscv_vlmul_trunc_i16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { - return __riscv_vlmul_trunc_i16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { - return __riscv_vlmul_trunc_i16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { - return __riscv_vlmul_trunc_i16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { - return __riscv_vlmul_trunc_i16m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { - return __riscv_vlmul_trunc_i32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { - return __riscv_vlmul_trunc_i32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { - return __riscv_vlmul_trunc_i32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { - return __riscv_vlmul_trunc_i32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { - return __riscv_vlmul_trunc_i32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { - return __riscv_vlmul_trunc_i32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { - return __riscv_vlmul_trunc_i32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { - return __riscv_vlmul_trunc_i32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { - return __riscv_vlmul_trunc_i32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { - return __riscv_vlmul_trunc_i32m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { - return __riscv_vlmul_trunc_i64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { - return __riscv_vlmul_trunc_i64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { - return __riscv_vlmul_trunc_i64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { - return __riscv_vlmul_trunc_i64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { - return __riscv_vlmul_trunc_i64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { - return __riscv_vlmul_trunc_i64m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { - return __riscv_vlmul_trunc_u8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { - return __riscv_vlmul_trunc_u8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { - return __riscv_vlmul_trunc_u8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { - return __riscv_vlmul_trunc_u8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { - return __riscv_vlmul_trunc_u8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { - return __riscv_vlmul_trunc_u8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { - return __riscv_vlmul_trunc_u8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { - return __riscv_vlmul_trunc_u8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { - return __riscv_vlmul_trunc_u8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { - return __riscv_vlmul_trunc_u8m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { - return __riscv_vlmul_trunc_u8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { - return __riscv_vlmul_trunc_u8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { - return __riscv_vlmul_trunc_u8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { - return __riscv_vlmul_trunc_u8m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { - return __riscv_vlmul_trunc_u8m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { - return __riscv_vlmul_trunc_u8mf8(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { - return __riscv_vlmul_trunc_u8mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { - return __riscv_vlmul_trunc_u8mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { - return __riscv_vlmul_trunc_u8m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { - return __riscv_vlmul_trunc_u8m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { - return __riscv_vlmul_trunc_u8m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { - return __riscv_vlmul_trunc_u16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { - return __riscv_vlmul_trunc_u16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { - return __riscv_vlmul_trunc_u16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { - return __riscv_vlmul_trunc_u16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { - return __riscv_vlmul_trunc_u16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { - return __riscv_vlmul_trunc_u16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { - return __riscv_vlmul_trunc_u16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { - return __riscv_vlmul_trunc_u16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { - return __riscv_vlmul_trunc_u16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { - return __riscv_vlmul_trunc_u16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { - return __riscv_vlmul_trunc_u16mf4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { - return __riscv_vlmul_trunc_u16mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { - return __riscv_vlmul_trunc_u16m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { - return __riscv_vlmul_trunc_u16m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { - return __riscv_vlmul_trunc_u16m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { - return __riscv_vlmul_trunc_u32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { - return __riscv_vlmul_trunc_u32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { - return __riscv_vlmul_trunc_u32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { - return __riscv_vlmul_trunc_u32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { - return __riscv_vlmul_trunc_u32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { - return __riscv_vlmul_trunc_u32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { - return __riscv_vlmul_trunc_u32mf2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { - return __riscv_vlmul_trunc_u32m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { - return __riscv_vlmul_trunc_u32m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { - return __riscv_vlmul_trunc_u32m4(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { - return __riscv_vlmul_trunc_u64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { - return __riscv_vlmul_trunc_u64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { - return __riscv_vlmul_trunc_u64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { - return __riscv_vlmul_trunc_u64m1(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { - return __riscv_vlmul_trunc_u64m2(op1); -} - -// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { - return __riscv_vlmul_trunc_u64m4(op1); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul_trunc_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul_trunc_v.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vlmul_trunc_v.c @@ -0,0 +1,1224 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16mf2_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv2f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16mf2_f16mf4(vfloat16mf2_t op1) { + return __riscv_vlmul_trunc_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv4f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16m1_f16mf4(vfloat16m1_t op1) { + return __riscv_vlmul_trunc_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m1_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv4f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_trunc_v_f16m1_f16mf2(vfloat16m1_t op1) { + return __riscv_vlmul_trunc_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16m2_f16mf4(vfloat16m2_t op1) { + return __riscv_vlmul_trunc_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_trunc_v_f16m2_f16mf2(vfloat16m2_t op1) { + return __riscv_vlmul_trunc_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m2_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv8f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_trunc_v_f16m2_f16m1(vfloat16m2_t op1) { + return __riscv_vlmul_trunc_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16m4_f16mf4(vfloat16m4_t op1) { + return __riscv_vlmul_trunc_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_trunc_v_f16m4_f16mf2(vfloat16m4_t op1) { + return __riscv_vlmul_trunc_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_trunc_v_f16m4_f16m1(vfloat16m4_t op1) { + return __riscv_vlmul_trunc_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m4_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv16f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlmul_trunc_v_f16m4_f16m2(vfloat16m4_t op1) { + return __riscv_vlmul_trunc_f16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vlmul_trunc_v_f16m8_f16mf4(vfloat16m8_t op1) { + return __riscv_vlmul_trunc_f16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vlmul_trunc_v_f16m8_f16mf2(vfloat16m8_t op1) { + return __riscv_vlmul_trunc_f16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vlmul_trunc_v_f16m8_f16m1(vfloat16m8_t op1) { + return __riscv_vlmul_trunc_f16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vlmul_trunc_v_f16m8_f16m2(vfloat16m8_t op1) { + return __riscv_vlmul_trunc_f16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f16m8_f16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16f16.nxv32f16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vlmul_trunc_v_f16m8_f16m4(vfloat16m8_t op1) { + return __riscv_vlmul_trunc_f16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv2f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) { + return __riscv_vlmul_trunc_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) { + return __riscv_vlmul_trunc_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv4f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) { + return __riscv_vlmul_trunc_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) { + return __riscv_vlmul_trunc_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) { + return __riscv_vlmul_trunc_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv8f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) { + return __riscv_vlmul_trunc_f32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) { + return __riscv_vlmul_trunc_f32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) { + return __riscv_vlmul_trunc_f32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) { + return __riscv_vlmul_trunc_f32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8f32.nxv16f32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) { + return __riscv_vlmul_trunc_f32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv2f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) { + return __riscv_vlmul_trunc_f64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) { + return __riscv_vlmul_trunc_f64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv4f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) { + return __riscv_vlmul_trunc_f64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) { + return __riscv_vlmul_trunc_f64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) { + return __riscv_vlmul_trunc_f64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4f64.nxv8f64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) { + return __riscv_vlmul_trunc_f64m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) { + return __riscv_vlmul_trunc_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) { + return __riscv_vlmul_trunc_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) { + return __riscv_vlmul_trunc_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) { + return __riscv_vlmul_trunc_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) { + return __riscv_vlmul_trunc_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) { + return __riscv_vlmul_trunc_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) { + return __riscv_vlmul_trunc_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) { + return __riscv_vlmul_trunc_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) { + return __riscv_vlmul_trunc_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) { + return __riscv_vlmul_trunc_i8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) { + return __riscv_vlmul_trunc_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) { + return __riscv_vlmul_trunc_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) { + return __riscv_vlmul_trunc_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) { + return __riscv_vlmul_trunc_i8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) { + return __riscv_vlmul_trunc_i8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) { + return __riscv_vlmul_trunc_i8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) { + return __riscv_vlmul_trunc_i8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) { + return __riscv_vlmul_trunc_i8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) { + return __riscv_vlmul_trunc_i8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) { + return __riscv_vlmul_trunc_i8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) { + return __riscv_vlmul_trunc_i8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) { + return __riscv_vlmul_trunc_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) { + return __riscv_vlmul_trunc_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) { + return __riscv_vlmul_trunc_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) { + return __riscv_vlmul_trunc_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) { + return __riscv_vlmul_trunc_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) { + return __riscv_vlmul_trunc_i16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) { + return __riscv_vlmul_trunc_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) { + return __riscv_vlmul_trunc_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) { + return __riscv_vlmul_trunc_i16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) { + return __riscv_vlmul_trunc_i16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) { + return __riscv_vlmul_trunc_i16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) { + return __riscv_vlmul_trunc_i16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) { + return __riscv_vlmul_trunc_i16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) { + return __riscv_vlmul_trunc_i16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) { + return __riscv_vlmul_trunc_i16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) { + return __riscv_vlmul_trunc_i32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) { + return __riscv_vlmul_trunc_i32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) { + return __riscv_vlmul_trunc_i32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) { + return __riscv_vlmul_trunc_i32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) { + return __riscv_vlmul_trunc_i32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) { + return __riscv_vlmul_trunc_i32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) { + return __riscv_vlmul_trunc_i32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) { + return __riscv_vlmul_trunc_i32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) { + return __riscv_vlmul_trunc_i32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) { + return __riscv_vlmul_trunc_i32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) { + return __riscv_vlmul_trunc_i64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) { + return __riscv_vlmul_trunc_i64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) { + return __riscv_vlmul_trunc_i64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) { + return __riscv_vlmul_trunc_i64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) { + return __riscv_vlmul_trunc_i64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) { + return __riscv_vlmul_trunc_i64m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv2i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) { + return __riscv_vlmul_trunc_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) { + return __riscv_vlmul_trunc_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv4i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) { + return __riscv_vlmul_trunc_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) { + return __riscv_vlmul_trunc_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) { + return __riscv_vlmul_trunc_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv8i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) { + return __riscv_vlmul_trunc_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) { + return __riscv_vlmul_trunc_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) { + return __riscv_vlmul_trunc_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) { + return __riscv_vlmul_trunc_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv16i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) { + return __riscv_vlmul_trunc_u8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) { + return __riscv_vlmul_trunc_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) { + return __riscv_vlmul_trunc_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) { + return __riscv_vlmul_trunc_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) { + return __riscv_vlmul_trunc_u8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv32i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) { + return __riscv_vlmul_trunc_u8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) { + return __riscv_vlmul_trunc_u8mf8(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) { + return __riscv_vlmul_trunc_u8mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) { + return __riscv_vlmul_trunc_u8mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) { + return __riscv_vlmul_trunc_u8m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) { + return __riscv_vlmul_trunc_u8m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) { + return __riscv_vlmul_trunc_u8m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv2i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) { + return __riscv_vlmul_trunc_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) { + return __riscv_vlmul_trunc_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv4i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) { + return __riscv_vlmul_trunc_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) { + return __riscv_vlmul_trunc_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) { + return __riscv_vlmul_trunc_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv8i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) { + return __riscv_vlmul_trunc_u16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) { + return __riscv_vlmul_trunc_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) { + return __riscv_vlmul_trunc_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) { + return __riscv_vlmul_trunc_u16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv16i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) { + return __riscv_vlmul_trunc_u16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) { + return __riscv_vlmul_trunc_u16mf4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) { + return __riscv_vlmul_trunc_u16mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) { + return __riscv_vlmul_trunc_u16m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) { + return __riscv_vlmul_trunc_u16m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv16i16.nxv32i16( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) { + return __riscv_vlmul_trunc_u16m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv2i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) { + return __riscv_vlmul_trunc_u32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) { + return __riscv_vlmul_trunc_u32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv4i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) { + return __riscv_vlmul_trunc_u32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) { + return __riscv_vlmul_trunc_u32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) { + return __riscv_vlmul_trunc_u32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv8i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) { + return __riscv_vlmul_trunc_u32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) { + return __riscv_vlmul_trunc_u32mf2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) { + return __riscv_vlmul_trunc_u32m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) { + return __riscv_vlmul_trunc_u32m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv8i32.nxv16i32( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) { + return __riscv_vlmul_trunc_u32m4(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv2i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) { + return __riscv_vlmul_trunc_u64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) { + return __riscv_vlmul_trunc_u64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv4i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) { + return __riscv_vlmul_trunc_u64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv1i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) { + return __riscv_vlmul_trunc_u64m1(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv2i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) { + return __riscv_vlmul_trunc_u64m2(op1); +} + +// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.vector.extract.nxv4i64.nxv8i64( [[OP1:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) { + return __riscv_vlmul_trunc_u64m4(op1); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext_vf2.c rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext_vf2.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext_vf2.c @@ -60,87 +60,6 @@ return __riscv_vsext_vf2(op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8(op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) @@ -186,42 +105,6 @@ return __riscv_vsext_vf2(op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4(op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) @@ -312,87 +195,6 @@ return __riscv_vsext_vf2(mask, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8(mask, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -438,42 +240,6 @@ return __riscv_vsext_vf2(mask, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4(mask, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext_vf4.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext_vf4.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext_vf4.c @@ -0,0 +1,170 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) { + return __riscv_vsext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) { + return __riscv_vsext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) { + return __riscv_vsext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) { + return __riscv_vsext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) { + return __riscv_vsext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint8m2_t op1, size_t vl) { + return __riscv_vsext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint16mf4_t op1, size_t vl) { + return __riscv_vsext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint16mf2_t op1, size_t vl) { + return __riscv_vsext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint16m1_t op1, size_t vl) { + return __riscv_vsext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint16m2_t op1, size_t vl) { + return __riscv_vsext_vf4(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext_vf8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext_vf8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vsext_vf8.c @@ -0,0 +1,80 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf8(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf8(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf8(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf8(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext_vf2.c rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext_vf2.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext_vf2.c @@ -60,87 +60,6 @@ return __riscv_vzext_vf2(op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8(op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) @@ -186,42 +105,6 @@ return __riscv_vzext_vf2(op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4(op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) @@ -312,87 +195,6 @@ return __riscv_vzext_vf2(mask, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8(mask, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) @@ -438,42 +240,6 @@ return __riscv_vzext_vf2(mask, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4(mask, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext_vf4.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext_vf4.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext_vf4.c @@ -0,0 +1,170 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) { + return __riscv_vzext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) { + return __riscv_vzext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) { + return __riscv_vzext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) { + return __riscv_vzext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) { + return __riscv_vzext_vf4(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint8m2_t op1, size_t vl) { + return __riscv_vzext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint16mf4_t op1, size_t vl) { + return __riscv_vzext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint16mf2_t op1, size_t vl) { + return __riscv_vzext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint16m1_t op1, size_t vl) { + return __riscv_vzext_vf4(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint16m2_t op1, size_t vl) { + return __riscv_vzext_vf4(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext_vf8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext_vf8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vzext_vf8.c @@ -0,0 +1,80 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf8(op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf8(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf8(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf8(mask, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( poison, [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf8(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfcvt.c @@ -16,15 +16,6 @@ return __riscv_vfcvt_x_f_v_i16mf4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -34,15 +25,6 @@ return __riscv_vfcvt_x_f_v_i16mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -52,15 +34,6 @@ return __riscv_vfcvt_x_f_v_i16m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -70,15 +43,6 @@ return __riscv_vfcvt_x_f_v_i16m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -88,15 +52,6 @@ return __riscv_vfcvt_x_f_v_i16m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -106,15 +61,6 @@ return __riscv_vfcvt_x_f_v_i16m8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m8_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -124,15 +70,6 @@ return __riscv_vfcvt_xu_f_v_u16mf4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -142,15 +79,6 @@ return __riscv_vfcvt_xu_f_v_u16mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -160,15 +88,6 @@ return __riscv_vfcvt_xu_f_v_u16m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -178,15 +97,6 @@ return __riscv_vfcvt_xu_f_v_u16m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -196,15 +106,6 @@ return __riscv_vfcvt_xu_f_v_u16m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -214,15 +115,6 @@ return __riscv_vfcvt_xu_f_v_u16m8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m8_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -340,15 +232,6 @@ return __riscv_vfcvt_x_f_v_i32mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -358,15 +241,6 @@ return __riscv_vfcvt_x_f_v_i32m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -376,15 +250,6 @@ return __riscv_vfcvt_x_f_v_i32m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -394,15 +259,6 @@ return __riscv_vfcvt_x_f_v_i32m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -412,15 +268,6 @@ return __riscv_vfcvt_x_f_v_i32m8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m8_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -430,15 +277,6 @@ return __riscv_vfcvt_xu_f_v_u32mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -448,15 +286,6 @@ return __riscv_vfcvt_xu_f_v_u32m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -466,15 +295,6 @@ return __riscv_vfcvt_xu_f_v_u32m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -484,15 +304,6 @@ return __riscv_vfcvt_xu_f_v_u32m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -502,15 +313,6 @@ return __riscv_vfcvt_xu_f_v_u32m8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m8_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -610,15 +412,6 @@ return __riscv_vfcvt_x_f_v_i64m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -628,15 +421,6 @@ return __riscv_vfcvt_x_f_v_i64m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -646,15 +430,6 @@ return __riscv_vfcvt_x_f_v_i64m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -664,15 +439,6 @@ return __riscv_vfcvt_x_f_v_i64m8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m8_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -682,15 +448,6 @@ return __riscv_vfcvt_xu_f_v_u64m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -700,15 +457,6 @@ return __riscv_vfcvt_xu_f_v_u64m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -718,15 +466,6 @@ return __riscv_vfcvt_xu_f_v_u64m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -736,15 +475,6 @@ return __riscv_vfcvt_xu_f_v_u64m8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m8_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -826,15 +556,6 @@ return __riscv_vfcvt_x_f_v_i16mf4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -844,15 +565,6 @@ return __riscv_vfcvt_x_f_v_i16mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -862,15 +574,6 @@ return __riscv_vfcvt_x_f_v_i16m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -880,15 +583,6 @@ return __riscv_vfcvt_x_f_v_i16m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -898,15 +592,6 @@ return __riscv_vfcvt_x_f_v_i16m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -916,15 +601,6 @@ return __riscv_vfcvt_x_f_v_i16m8_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m8_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -934,15 +610,6 @@ return __riscv_vfcvt_xu_f_v_u16mf4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -952,15 +619,6 @@ return __riscv_vfcvt_xu_f_v_u16mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -970,15 +628,6 @@ return __riscv_vfcvt_xu_f_v_u16m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -988,15 +637,6 @@ return __riscv_vfcvt_xu_f_v_u16m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1006,15 +646,6 @@ return __riscv_vfcvt_xu_f_v_u16m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1024,15 +655,6 @@ return __riscv_vfcvt_xu_f_v_u16m8_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m8_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1150,15 +772,6 @@ return __riscv_vfcvt_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1168,15 +781,6 @@ return __riscv_vfcvt_x_f_v_i32m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1186,15 +790,6 @@ return __riscv_vfcvt_x_f_v_i32m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1204,15 +799,6 @@ return __riscv_vfcvt_x_f_v_i32m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1222,15 +808,6 @@ return __riscv_vfcvt_x_f_v_i32m8_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m8_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1240,15 +817,6 @@ return __riscv_vfcvt_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1258,15 +826,6 @@ return __riscv_vfcvt_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1276,15 +835,6 @@ return __riscv_vfcvt_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1294,15 +844,6 @@ return __riscv_vfcvt_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1312,15 +853,6 @@ return __riscv_vfcvt_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1420,15 +952,6 @@ return __riscv_vfcvt_x_f_v_i64m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1438,15 +961,6 @@ return __riscv_vfcvt_x_f_v_i64m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1456,15 +970,6 @@ return __riscv_vfcvt_x_f_v_i64m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1474,15 +979,6 @@ return __riscv_vfcvt_x_f_v_i64m8_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m8_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1492,15 +988,6 @@ return __riscv_vfcvt_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1510,15 +997,6 @@ return __riscv_vfcvt_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1528,15 +1006,6 @@ return __riscv_vfcvt_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1546,15 +1015,6 @@ return __riscv_vfcvt_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1636,15 +1096,6 @@ return __riscv_vfcvt_x_f_v_i16mf4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1654,15 +1105,6 @@ return __riscv_vfcvt_x_f_v_i16mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1672,15 +1114,6 @@ return __riscv_vfcvt_x_f_v_i16m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1690,15 +1123,6 @@ return __riscv_vfcvt_x_f_v_i16m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1708,15 +1132,6 @@ return __riscv_vfcvt_x_f_v_i16m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1726,15 +1141,6 @@ return __riscv_vfcvt_x_f_v_i16m8_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m8_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1744,15 +1150,6 @@ return __riscv_vfcvt_xu_f_v_u16mf4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1762,31 +1159,13 @@ return __riscv_vfcvt_xu_f_v_u16mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_tumu(mask, maskedoff, src, vl); -} - -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m1_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_f_v_u16m1_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_tumu( @@ -1798,15 +1177,6 @@ return __riscv_vfcvt_xu_f_v_u16m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1816,15 +1186,6 @@ return __riscv_vfcvt_xu_f_v_u16m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1834,15 +1195,6 @@ return __riscv_vfcvt_xu_f_v_u16m8_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m8_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1960,15 +1312,6 @@ return __riscv_vfcvt_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1978,15 +1321,6 @@ return __riscv_vfcvt_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1996,15 +1330,6 @@ return __riscv_vfcvt_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2014,15 +1339,6 @@ return __riscv_vfcvt_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2032,15 +1348,6 @@ return __riscv_vfcvt_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2050,15 +1357,6 @@ return __riscv_vfcvt_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2068,15 +1366,6 @@ return __riscv_vfcvt_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2086,15 +1375,6 @@ return __riscv_vfcvt_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2104,15 +1384,6 @@ return __riscv_vfcvt_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2122,15 +1393,6 @@ return __riscv_vfcvt_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2230,15 +1492,6 @@ return __riscv_vfcvt_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2248,15 +1501,6 @@ return __riscv_vfcvt_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2266,15 +1510,6 @@ return __riscv_vfcvt_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2284,15 +1519,6 @@ return __riscv_vfcvt_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2302,15 +1528,6 @@ return __riscv_vfcvt_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2320,15 +1537,6 @@ return __riscv_vfcvt_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2338,15 +1546,6 @@ return __riscv_vfcvt_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2356,15 +1555,6 @@ return __riscv_vfcvt_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2446,15 +1636,6 @@ return __riscv_vfcvt_x_f_v_i16mf4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2464,15 +1645,6 @@ return __riscv_vfcvt_x_f_v_i16mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2482,15 +1654,6 @@ return __riscv_vfcvt_x_f_v_i16m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2500,15 +1663,6 @@ return __riscv_vfcvt_x_f_v_i16m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2518,15 +1672,6 @@ return __riscv_vfcvt_x_f_v_i16m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2536,15 +1681,6 @@ return __riscv_vfcvt_x_f_v_i16m8_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i16m8_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2554,15 +1690,6 @@ return __riscv_vfcvt_xu_f_v_u16mf4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2572,15 +1699,6 @@ return __riscv_vfcvt_xu_f_v_u16mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2590,15 +1708,6 @@ return __riscv_vfcvt_xu_f_v_u16m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2608,15 +1717,6 @@ return __riscv_vfcvt_xu_f_v_u16m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2626,15 +1726,6 @@ return __riscv_vfcvt_xu_f_v_u16m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2644,15 +1735,6 @@ return __riscv_vfcvt_xu_f_v_u16m8_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u16m8_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2770,15 +1852,6 @@ return __riscv_vfcvt_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2788,15 +1861,6 @@ return __riscv_vfcvt_x_f_v_i32m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2806,15 +1870,6 @@ return __riscv_vfcvt_x_f_v_i32m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2824,15 +1879,6 @@ return __riscv_vfcvt_x_f_v_i32m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2842,15 +1888,6 @@ return __riscv_vfcvt_x_f_v_i32m8_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i32m8_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2860,15 +1897,6 @@ return __riscv_vfcvt_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2878,15 +1906,6 @@ return __riscv_vfcvt_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2896,15 +1915,6 @@ return __riscv_vfcvt_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2914,15 +1924,6 @@ return __riscv_vfcvt_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2932,15 +1933,6 @@ return __riscv_vfcvt_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3040,15 +2032,6 @@ return __riscv_vfcvt_x_f_v_i64m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3058,15 +2041,6 @@ return __riscv_vfcvt_x_f_v_i64m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3076,15 +2050,6 @@ return __riscv_vfcvt_x_f_v_i64m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3094,15 +2059,6 @@ return __riscv_vfcvt_x_f_v_i64m8_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_f_v_i64m8_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3112,15 +2068,6 @@ return __riscv_vfcvt_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3130,15 +2077,6 @@ return __riscv_vfcvt_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3148,15 +2086,6 @@ return __riscv_vfcvt_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3166,15 +2095,6 @@ return __riscv_vfcvt_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfcvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfcvt_rtz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfcvt_rtz.c @@ -0,0 +1,1089 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m8_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m8_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m8_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m8_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m8_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m8_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i16m8_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u16m8_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i32m8_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_f_v_i64m8_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt.c @@ -16,15 +16,6 @@ return __riscv_vfncvt_x_f_w_i8mf8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf8_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -34,15 +25,6 @@ return __riscv_vfncvt_x_f_w_i8mf4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -52,15 +34,6 @@ return __riscv_vfncvt_x_f_w_i8mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -70,15 +43,6 @@ return __riscv_vfncvt_x_f_w_i8m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -88,15 +52,6 @@ return __riscv_vfncvt_x_f_w_i8m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -106,15 +61,6 @@ return __riscv_vfncvt_x_f_w_i8m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -124,15 +70,6 @@ return __riscv_vfncvt_xu_f_w_u8mf8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -142,15 +79,6 @@ return __riscv_vfncvt_xu_f_w_u8mf4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -160,15 +88,6 @@ return __riscv_vfncvt_xu_f_w_u8mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -178,15 +97,6 @@ return __riscv_vfncvt_xu_f_w_u8m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -196,15 +106,6 @@ return __riscv_vfncvt_xu_f_w_u8m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -214,15 +115,6 @@ return __riscv_vfncvt_xu_f_w_u8m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -232,15 +124,6 @@ return __riscv_vfncvt_x_f_w_i16mf4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -250,15 +133,6 @@ return __riscv_vfncvt_x_f_w_i16mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -268,15 +142,6 @@ return __riscv_vfncvt_x_f_w_i16m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -286,15 +151,6 @@ return __riscv_vfncvt_x_f_w_i16m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -304,15 +160,6 @@ return __riscv_vfncvt_x_f_w_i16m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -322,15 +169,6 @@ return __riscv_vfncvt_xu_f_w_u16mf4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -340,15 +178,6 @@ return __riscv_vfncvt_xu_f_w_u16mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -358,15 +187,6 @@ return __riscv_vfncvt_xu_f_w_u16m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -376,15 +196,6 @@ return __riscv_vfncvt_xu_f_w_u16m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -394,15 +205,6 @@ return __riscv_vfncvt_xu_f_w_u16m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -502,15 +304,6 @@ return __riscv_vfncvt_f_f_w_f16mf4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -520,15 +313,6 @@ return __riscv_vfncvt_f_f_w_f16mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -538,15 +322,6 @@ return __riscv_vfncvt_f_f_w_f16m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -556,15 +331,6 @@ return __riscv_vfncvt_f_f_w_f16m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -574,15 +340,6 @@ return __riscv_vfncvt_f_f_w_f16m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -592,15 +349,6 @@ return __riscv_vfncvt_x_f_w_i32mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -610,15 +358,6 @@ return __riscv_vfncvt_x_f_w_i32m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -628,15 +367,6 @@ return __riscv_vfncvt_x_f_w_i32m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -646,15 +376,6 @@ return __riscv_vfncvt_x_f_w_i32m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -664,15 +385,6 @@ return __riscv_vfncvt_xu_f_w_u32mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -682,15 +394,6 @@ return __riscv_vfncvt_xu_f_w_u32m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -700,15 +403,6 @@ return __riscv_vfncvt_xu_f_w_u32m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -718,15 +412,6 @@ return __riscv_vfncvt_xu_f_w_u32m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -808,15 +493,6 @@ return __riscv_vfncvt_f_f_w_f32mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -826,15 +502,6 @@ return __riscv_vfncvt_f_f_w_f32m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -844,15 +511,6 @@ return __riscv_vfncvt_f_f_w_f32m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -862,15 +520,6 @@ return __riscv_vfncvt_f_f_w_f32m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -880,15 +529,6 @@ return __riscv_vfncvt_x_f_w_i8mf8_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf8_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -898,15 +538,6 @@ return __riscv_vfncvt_x_f_w_i8mf4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -916,15 +547,6 @@ return __riscv_vfncvt_x_f_w_i8mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -934,15 +556,6 @@ return __riscv_vfncvt_x_f_w_i8m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -952,15 +565,6 @@ return __riscv_vfncvt_x_f_w_i8m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -970,15 +574,6 @@ return __riscv_vfncvt_x_f_w_i8m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -988,15 +583,6 @@ return __riscv_vfncvt_xu_f_w_u8mf8_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1006,15 +592,6 @@ return __riscv_vfncvt_xu_f_w_u8mf4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1024,15 +601,6 @@ return __riscv_vfncvt_xu_f_w_u8mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1042,15 +610,6 @@ return __riscv_vfncvt_xu_f_w_u8m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1060,15 +619,6 @@ return __riscv_vfncvt_xu_f_w_u8m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1078,15 +628,6 @@ return __riscv_vfncvt_xu_f_w_u8m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1096,15 +637,6 @@ return __riscv_vfncvt_x_f_w_i16mf4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1114,15 +646,6 @@ return __riscv_vfncvt_x_f_w_i16mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1132,15 +655,6 @@ return __riscv_vfncvt_x_f_w_i16m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1150,15 +664,6 @@ return __riscv_vfncvt_x_f_w_i16m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1168,15 +673,6 @@ return __riscv_vfncvt_x_f_w_i16m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1186,15 +682,6 @@ return __riscv_vfncvt_xu_f_w_u16mf4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1204,15 +691,6 @@ return __riscv_vfncvt_xu_f_w_u16mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1222,15 +700,6 @@ return __riscv_vfncvt_xu_f_w_u16m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1240,15 +709,6 @@ return __riscv_vfncvt_xu_f_w_u16m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1258,15 +718,6 @@ return __riscv_vfncvt_xu_f_w_u16m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1366,15 +817,6 @@ return __riscv_vfncvt_f_f_w_f16mf4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1384,15 +826,6 @@ return __riscv_vfncvt_f_f_w_f16mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1402,15 +835,6 @@ return __riscv_vfncvt_f_f_w_f16m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1420,15 +844,6 @@ return __riscv_vfncvt_f_f_w_f16m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1438,15 +853,6 @@ return __riscv_vfncvt_f_f_w_f16m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1456,15 +862,6 @@ return __riscv_vfncvt_x_f_w_i32mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1474,15 +871,6 @@ return __riscv_vfncvt_x_f_w_i32m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1492,15 +880,6 @@ return __riscv_vfncvt_x_f_w_i32m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1510,15 +889,6 @@ return __riscv_vfncvt_x_f_w_i32m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1528,15 +898,6 @@ return __riscv_vfncvt_xu_f_w_u32mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1546,15 +907,6 @@ return __riscv_vfncvt_xu_f_w_u32m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1564,15 +916,6 @@ return __riscv_vfncvt_xu_f_w_u32m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1582,15 +925,6 @@ return __riscv_vfncvt_xu_f_w_u32m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1672,15 +1006,6 @@ return __riscv_vfncvt_f_f_w_f32mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1690,15 +1015,6 @@ return __riscv_vfncvt_f_f_w_f32m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1708,15 +1024,6 @@ return __riscv_vfncvt_f_f_w_f32m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1726,15 +1033,6 @@ return __riscv_vfncvt_f_f_w_f32m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1744,15 +1042,6 @@ return __riscv_vfncvt_x_f_w_i8mf8_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf8_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1762,15 +1051,6 @@ return __riscv_vfncvt_x_f_w_i8mf4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1780,15 +1060,6 @@ return __riscv_vfncvt_x_f_w_i8mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1798,15 +1069,6 @@ return __riscv_vfncvt_x_f_w_i8m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1816,15 +1078,6 @@ return __riscv_vfncvt_x_f_w_i8m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1834,15 +1087,6 @@ return __riscv_vfncvt_x_f_w_i8m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1852,15 +1096,6 @@ return __riscv_vfncvt_xu_f_w_u8mf8_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1870,15 +1105,6 @@ return __riscv_vfncvt_xu_f_w_u8mf4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1888,15 +1114,6 @@ return __riscv_vfncvt_xu_f_w_u8mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1906,15 +1123,6 @@ return __riscv_vfncvt_xu_f_w_u8m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1924,15 +1132,6 @@ return __riscv_vfncvt_xu_f_w_u8m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1942,15 +1141,6 @@ return __riscv_vfncvt_xu_f_w_u8m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1960,15 +1150,6 @@ return __riscv_vfncvt_x_f_w_i16mf4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1978,15 +1159,6 @@ return __riscv_vfncvt_x_f_w_i16mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1996,15 +1168,6 @@ return __riscv_vfncvt_x_f_w_i16m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2014,15 +1177,6 @@ return __riscv_vfncvt_x_f_w_i16m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2032,15 +1186,6 @@ return __riscv_vfncvt_x_f_w_i16m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2050,15 +1195,6 @@ return __riscv_vfncvt_xu_f_w_u16mf4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2068,15 +1204,6 @@ return __riscv_vfncvt_xu_f_w_u16mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2086,15 +1213,6 @@ return __riscv_vfncvt_xu_f_w_u16m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2104,15 +1222,6 @@ return __riscv_vfncvt_xu_f_w_u16m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2122,15 +1231,6 @@ return __riscv_vfncvt_xu_f_w_u16m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2230,15 +1330,6 @@ return __riscv_vfncvt_f_f_w_f16mf4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2248,15 +1339,6 @@ return __riscv_vfncvt_f_f_w_f16mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2266,15 +1348,6 @@ return __riscv_vfncvt_f_f_w_f16m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2284,31 +1357,13 @@ return __riscv_vfncvt_f_f_w_f16m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_tumu(mask, maskedoff, src, vl); -} - -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m4_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_f_w_f16m4_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_tumu( @@ -2320,15 +1375,6 @@ return __riscv_vfncvt_x_f_w_i32mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2338,15 +1384,6 @@ return __riscv_vfncvt_x_f_w_i32m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2356,15 +1393,6 @@ return __riscv_vfncvt_x_f_w_i32m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2374,15 +1402,6 @@ return __riscv_vfncvt_x_f_w_i32m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2392,15 +1411,6 @@ return __riscv_vfncvt_xu_f_w_u32mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2410,15 +1420,6 @@ return __riscv_vfncvt_xu_f_w_u32m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2428,15 +1429,6 @@ return __riscv_vfncvt_xu_f_w_u32m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2446,15 +1438,6 @@ return __riscv_vfncvt_xu_f_w_u32m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2536,15 +1519,6 @@ return __riscv_vfncvt_f_f_w_f32mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2554,15 +1528,6 @@ return __riscv_vfncvt_f_f_w_f32m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2572,15 +1537,6 @@ return __riscv_vfncvt_f_f_w_f32m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2590,15 +1546,6 @@ return __riscv_vfncvt_f_f_w_f32m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2608,15 +1555,6 @@ return __riscv_vfncvt_x_f_w_i8mf8_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf8_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2626,15 +1564,6 @@ return __riscv_vfncvt_x_f_w_i8mf4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2644,15 +1573,6 @@ return __riscv_vfncvt_x_f_w_i8mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2662,15 +1582,6 @@ return __riscv_vfncvt_x_f_w_i8m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2680,15 +1591,6 @@ return __riscv_vfncvt_x_f_w_i8m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2698,15 +1600,6 @@ return __riscv_vfncvt_x_f_w_i8m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i8m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2716,15 +1609,6 @@ return __riscv_vfncvt_xu_f_w_u8mf8_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf8_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2734,15 +1618,6 @@ return __riscv_vfncvt_xu_f_w_u8mf4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2752,15 +1627,6 @@ return __riscv_vfncvt_xu_f_w_u8mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2770,15 +1636,6 @@ return __riscv_vfncvt_xu_f_w_u8m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2788,15 +1645,6 @@ return __riscv_vfncvt_xu_f_w_u8m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2806,15 +1654,6 @@ return __riscv_vfncvt_xu_f_w_u8m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u8m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2824,15 +1663,6 @@ return __riscv_vfncvt_x_f_w_i16mf4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2842,15 +1672,6 @@ return __riscv_vfncvt_x_f_w_i16mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2860,15 +1681,6 @@ return __riscv_vfncvt_x_f_w_i16m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2878,15 +1690,6 @@ return __riscv_vfncvt_x_f_w_i16m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2896,15 +1699,6 @@ return __riscv_vfncvt_x_f_w_i16m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i16m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2914,15 +1708,6 @@ return __riscv_vfncvt_xu_f_w_u16mf4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2932,15 +1717,6 @@ return __riscv_vfncvt_xu_f_w_u16mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2950,15 +1726,6 @@ return __riscv_vfncvt_xu_f_w_u16m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2968,15 +1735,6 @@ return __riscv_vfncvt_xu_f_w_u16m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2986,15 +1744,6 @@ return __riscv_vfncvt_xu_f_w_u16m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u16m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3094,15 +1843,6 @@ return __riscv_vfncvt_f_f_w_f16mf4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3112,15 +1852,6 @@ return __riscv_vfncvt_f_f_w_f16mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3130,15 +1861,6 @@ return __riscv_vfncvt_f_f_w_f16m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3148,15 +1870,6 @@ return __riscv_vfncvt_f_f_w_f16m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3166,15 +1879,6 @@ return __riscv_vfncvt_f_f_w_f16m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f16m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3184,15 +1888,6 @@ return __riscv_vfncvt_x_f_w_i32mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3202,15 +1897,6 @@ return __riscv_vfncvt_x_f_w_i32m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3220,15 +1906,6 @@ return __riscv_vfncvt_x_f_w_i32m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3238,15 +1915,6 @@ return __riscv_vfncvt_x_f_w_i32m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_f_w_i32m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3256,15 +1924,6 @@ return __riscv_vfncvt_xu_f_w_u32mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3274,15 +1933,6 @@ return __riscv_vfncvt_xu_f_w_u32m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3292,15 +1942,6 @@ return __riscv_vfncvt_xu_f_w_u32m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3310,15 +1951,6 @@ return __riscv_vfncvt_xu_f_w_u32m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_f_w_u32m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3400,15 +2032,6 @@ return __riscv_vfncvt_f_f_w_f32mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3418,15 +2041,6 @@ return __riscv_vfncvt_f_f_w_f32m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3436,15 +2050,6 @@ return __riscv_vfncvt_f_f_w_f32m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3454,12 +2059,3 @@ return __riscv_vfncvt_f_f_w_f32m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_f_w_f32m4_mu(mask, maskedoff, src, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rod.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rod.c @@ -0,0 +1,333 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f16m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_f32m4_mu(mask, maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rtz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfncvt_rtz.c @@ -0,0 +1,1089 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf8_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf8_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf8_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i8m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf8_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u8m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i16m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u16m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_i32m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_u32m4_mu(mask, maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt.c @@ -124,15 +124,6 @@ return __riscv_vfwcvt_x_f_v_i32mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -142,15 +133,6 @@ return __riscv_vfwcvt_x_f_v_i32m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -160,15 +142,6 @@ return __riscv_vfwcvt_x_f_v_i32m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -178,15 +151,6 @@ return __riscv_vfwcvt_x_f_v_i32m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -196,15 +160,6 @@ return __riscv_vfwcvt_x_f_v_i32m8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m8_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -214,15 +169,6 @@ return __riscv_vfwcvt_xu_f_v_u32mf2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -232,15 +178,6 @@ return __riscv_vfwcvt_xu_f_v_u32m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -250,15 +187,6 @@ return __riscv_vfwcvt_xu_f_v_u32m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -268,15 +196,6 @@ return __riscv_vfwcvt_xu_f_v_u32m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -286,15 +205,6 @@ return __riscv_vfwcvt_xu_f_v_u32m8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -439,15 +349,6 @@ return __riscv_vfwcvt_x_f_v_i64m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -457,15 +358,6 @@ return __riscv_vfwcvt_x_f_v_i64m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -475,15 +367,6 @@ return __riscv_vfwcvt_x_f_v_i64m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -493,15 +376,6 @@ return __riscv_vfwcvt_x_f_v_i64m8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m8_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -511,15 +385,6 @@ return __riscv_vfwcvt_xu_f_v_u64m1_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -529,15 +394,6 @@ return __riscv_vfwcvt_xu_f_v_u64m2_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -547,15 +403,6 @@ return __riscv_vfwcvt_xu_f_v_u64m4_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -565,15 +412,6 @@ return __riscv_vfwcvt_xu_f_v_u64m8_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -799,15 +637,6 @@ return __riscv_vfwcvt_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -817,15 +646,6 @@ return __riscv_vfwcvt_x_f_v_i32m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -835,15 +655,6 @@ return __riscv_vfwcvt_x_f_v_i32m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -853,15 +664,6 @@ return __riscv_vfwcvt_x_f_v_i32m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -871,15 +673,6 @@ return __riscv_vfwcvt_x_f_v_i32m8_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m8_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -889,15 +682,6 @@ return __riscv_vfwcvt_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -907,15 +691,6 @@ return __riscv_vfwcvt_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -925,15 +700,6 @@ return __riscv_vfwcvt_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -943,15 +709,6 @@ return __riscv_vfwcvt_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -961,15 +718,6 @@ return __riscv_vfwcvt_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1114,15 +862,6 @@ return __riscv_vfwcvt_x_f_v_i64m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1132,15 +871,6 @@ return __riscv_vfwcvt_x_f_v_i64m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1150,15 +880,6 @@ return __riscv_vfwcvt_x_f_v_i64m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1168,15 +889,6 @@ return __riscv_vfwcvt_x_f_v_i64m8_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m8_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1186,15 +898,6 @@ return __riscv_vfwcvt_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1204,15 +907,6 @@ return __riscv_vfwcvt_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1222,15 +916,6 @@ return __riscv_vfwcvt_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1240,15 +925,6 @@ return __riscv_vfwcvt_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1474,15 +1150,6 @@ return __riscv_vfwcvt_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1492,15 +1159,6 @@ return __riscv_vfwcvt_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1510,15 +1168,6 @@ return __riscv_vfwcvt_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1528,15 +1177,6 @@ return __riscv_vfwcvt_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1546,15 +1186,6 @@ return __riscv_vfwcvt_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1564,15 +1195,6 @@ return __riscv_vfwcvt_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1582,15 +1204,6 @@ return __riscv_vfwcvt_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1600,15 +1213,6 @@ return __riscv_vfwcvt_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1618,15 +1222,6 @@ return __riscv_vfwcvt_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1636,15 +1231,6 @@ return __riscv_vfwcvt_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1789,15 +1375,6 @@ return __riscv_vfwcvt_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1807,15 +1384,6 @@ return __riscv_vfwcvt_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1825,15 +1393,6 @@ return __riscv_vfwcvt_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1843,15 +1402,6 @@ return __riscv_vfwcvt_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1861,15 +1411,6 @@ return __riscv_vfwcvt_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1879,15 +1420,6 @@ return __riscv_vfwcvt_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1897,15 +1429,6 @@ return __riscv_vfwcvt_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1915,15 +1438,6 @@ return __riscv_vfwcvt_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2149,15 +1663,6 @@ return __riscv_vfwcvt_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2167,15 +1672,6 @@ return __riscv_vfwcvt_x_f_v_i32m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2185,15 +1681,6 @@ return __riscv_vfwcvt_x_f_v_i32m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2203,15 +1690,6 @@ return __riscv_vfwcvt_x_f_v_i32m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2221,15 +1699,6 @@ return __riscv_vfwcvt_x_f_v_i32m8_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i32m8_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2239,15 +1708,6 @@ return __riscv_vfwcvt_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2257,15 +1717,6 @@ return __riscv_vfwcvt_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2275,15 +1726,6 @@ return __riscv_vfwcvt_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2293,15 +1735,6 @@ return __riscv_vfwcvt_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2311,15 +1744,6 @@ return __riscv_vfwcvt_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2464,15 +1888,6 @@ return __riscv_vfwcvt_x_f_v_i64m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2482,15 +1897,6 @@ return __riscv_vfwcvt_x_f_v_i64m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2500,15 +1906,6 @@ return __riscv_vfwcvt_x_f_v_i64m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2518,15 +1915,6 @@ return __riscv_vfwcvt_x_f_v_i64m8_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_f_v_i64m8_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2536,15 +1924,6 @@ return __riscv_vfwcvt_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2554,15 +1933,6 @@ return __riscv_vfwcvt_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2572,15 +1942,6 @@ return __riscv_vfwcvt_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2590,15 +1951,6 @@ return __riscv_vfwcvt_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt_rtz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfwcvt_rtz.c @@ -0,0 +1,657 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m8_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m8_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m8_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m8_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i32m8_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u32m8_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_f_v_i64m8_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m1_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m2_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m4_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_f_v_u64m8_mu(mask, maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext_vf2.c rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext_vf2.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext_vf2.c @@ -60,87 +60,6 @@ return __riscv_vsext_vf2_i16m8_tu(maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_i32mf2_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsext_vf4_i32m1_tu(vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m1_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsext_vf4_i32m2_tu(vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m2_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsext_vf4_i32m4_tu(vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsext_vf4_i32m8_tu(vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m8_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m1_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m2_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m8_tu(maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) @@ -186,42 +105,6 @@ return __riscv_vsext_vf2_i32m8_tu(maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf4_i64m1_tu(vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m1_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf4_i64m2_tu(vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m2_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf4_i64m4_tu(vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m8_tu(maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) @@ -312,87 +195,6 @@ return __riscv_vsext_vf2_i16m8_tum(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_i32mf2_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m1_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m2_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m8_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m1_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m2_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m8_tum(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -438,42 +240,6 @@ return __riscv_vsext_vf2_i32m8_tum(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m1_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m2_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m8_tum(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -564,87 +330,6 @@ return __riscv_vsext_vf2_i16m8_tumu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_i32mf2_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m1_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m2_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m8_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m1_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m2_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m8_tumu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -690,42 +375,6 @@ return __riscv_vsext_vf2_i32m8_tumu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m1_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m2_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m8_tumu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -816,87 +465,6 @@ return __riscv_vsext_vf2_i16m8_mu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_i32mf2_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m1_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m2_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i32m8_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m1_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m2_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_i64m8_mu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -942,42 +510,6 @@ return __riscv_vsext_vf2_i32m8_mu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m1_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m2_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_i64m8_mu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext_vf4.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext_vf4.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext_vf4.c @@ -0,0 +1,332 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf4_i32mf2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1_tu(vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m1_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2_tu(vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4_tu(vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8_tu(vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1_tu(vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m1_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2_tu(vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4_tu(vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf4_i32mf2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m1_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m1_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf4_i32mf2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m1_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m1_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf4_i32mf2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m1_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { + return __riscv_vsext_vf4_i32m8_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m1_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { + return __riscv_vsext_vf4_i64m8_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext_vf8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext_vf8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vsext_vf8.c @@ -0,0 +1,152 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m1_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m1_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m1_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m1_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf8_i64m8_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext_vf2.c rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext_vf2.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext_vf2.c @@ -60,87 +60,6 @@ return __riscv_vzext_vf2_u16m8_tu(maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_u32mf2_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m1_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m2_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vzext_vf4_u32m4_tu(vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m8_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m1_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m2_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m8_tu(maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) @@ -186,42 +105,6 @@ return __riscv_vzext_vf2_u32m8_tu(maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m1_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m2_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m8_tu(maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) @@ -312,87 +195,6 @@ return __riscv_vzext_vf2_u16m8_tum(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_u32mf2_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m1_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m2_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m8_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m1_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m2_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m8_tum(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -438,42 +240,6 @@ return __riscv_vzext_vf2_u32m8_tum(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m1_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m2_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m8_tum(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -564,87 +330,6 @@ return __riscv_vzext_vf2_u16m8_tumu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_u32mf2_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m1_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m2_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m8_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m1_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m2_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m8_tumu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -690,42 +375,6 @@ return __riscv_vzext_vf2_u32m8_tumu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m1_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m2_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m8_tumu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -816,87 +465,6 @@ return __riscv_vzext_vf2_u16m8_mu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_u32mf2_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m1_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m2_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u32m8_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m1_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m2_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_u64m8_mu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -942,42 +510,6 @@ return __riscv_vzext_vf2_u32m8_mu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m1_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m2_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_u64m8_mu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext_vf4.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext_vf4.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext_vf4.c @@ -0,0 +1,332 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf4_u32mf2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m1_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4_tu(vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m1_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf4_u32mf2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m1_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m1_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf4_u32mf2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m1_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m1_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf4_u32mf2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m1_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { + return __riscv_vzext_vf4_u32m8_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m1_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { + return __riscv_vzext_vf4_u64m8_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext_vf8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext_vf8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vzext_vf8.c @@ -0,0 +1,152 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m1_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m1_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m1_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m1_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf8_u64m8_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfcvt.c @@ -16,15 +16,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -34,15 +25,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -52,15 +34,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -70,15 +43,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -88,15 +52,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -106,15 +61,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -124,15 +70,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -142,15 +79,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -160,15 +88,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -178,15 +97,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -196,15 +106,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -214,15 +115,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -340,15 +232,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -358,15 +241,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -376,15 +250,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -394,15 +259,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -412,15 +268,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -430,15 +277,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -448,15 +286,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -466,15 +295,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -484,15 +304,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -502,15 +313,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -610,15 +412,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -628,15 +421,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -646,15 +430,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -664,15 +439,6 @@ return __riscv_vfcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -682,15 +448,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -700,15 +457,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -718,15 +466,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -736,15 +475,6 @@ return __riscv_vfcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -826,15 +556,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -844,15 +565,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -862,15 +574,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -880,15 +583,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -898,15 +592,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -916,15 +601,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -934,15 +610,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -952,15 +619,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -970,15 +628,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -988,15 +637,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1006,15 +646,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1024,15 +655,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1150,15 +772,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1168,15 +781,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1186,15 +790,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1204,15 +799,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1222,15 +808,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1240,15 +817,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1258,15 +826,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1276,15 +835,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1294,15 +844,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1312,15 +853,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1420,15 +952,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1438,15 +961,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1456,15 +970,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1474,15 +979,6 @@ return __riscv_vfcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1492,15 +988,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1510,15 +997,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1528,15 +1006,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1546,15 +1015,6 @@ return __riscv_vfcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1636,15 +1096,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1654,15 +1105,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1672,15 +1114,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1690,15 +1123,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1708,15 +1132,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1726,15 +1141,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1744,15 +1150,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1762,31 +1159,13 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vuint16m1_t test_vfcvt_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); -} - -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); + return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_tumu( @@ -1798,15 +1177,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1816,15 +1186,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1834,15 +1195,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1960,15 +1312,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1978,15 +1321,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1996,15 +1330,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2014,15 +1339,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2032,15 +1348,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2050,15 +1357,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2068,15 +1366,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2086,15 +1375,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2104,15 +1384,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2122,15 +1393,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2230,15 +1492,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2248,15 +1501,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2266,15 +1510,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2284,15 +1519,6 @@ return __riscv_vfcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2302,15 +1528,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2320,15 +1537,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2338,15 +1546,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2356,15 +1555,6 @@ return __riscv_vfcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2446,15 +1636,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2464,15 +1645,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2482,15 +1654,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2500,15 +1663,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2518,15 +1672,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2536,15 +1681,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2554,15 +1690,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2572,15 +1699,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2590,15 +1708,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2608,15 +1717,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2626,15 +1726,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2644,15 +1735,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2770,15 +1852,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2788,15 +1861,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2806,15 +1870,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2824,15 +1879,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2842,15 +1888,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2860,15 +1897,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2878,15 +1906,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2896,15 +1915,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2914,15 +1924,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2932,15 +1933,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3040,15 +2032,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3058,15 +2041,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3076,15 +2050,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3094,15 +2059,6 @@ return __riscv_vfcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3112,15 +2068,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3130,15 +2077,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3148,15 +2086,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3166,15 +2095,6 @@ return __riscv_vfcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfcvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfcvt_rtz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfcvt_rtz.c @@ -0,0 +1,1089 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt.c @@ -16,15 +16,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -34,15 +25,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -52,15 +34,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -70,15 +43,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -88,15 +52,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -106,15 +61,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -124,15 +70,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -142,15 +79,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -160,15 +88,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -178,15 +97,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -196,15 +106,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -214,15 +115,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -232,15 +124,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -250,15 +133,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -268,15 +142,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -286,15 +151,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -304,15 +160,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -322,15 +169,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -340,15 +178,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -358,15 +187,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -376,15 +196,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -394,15 +205,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -502,15 +304,6 @@ return __riscv_vfncvt_f_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -520,15 +313,6 @@ return __riscv_vfncvt_f_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -538,15 +322,6 @@ return __riscv_vfncvt_f_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -556,15 +331,6 @@ return __riscv_vfncvt_f_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -574,15 +340,6 @@ return __riscv_vfncvt_f_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -592,15 +349,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -610,15 +358,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -628,15 +367,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -646,15 +376,6 @@ return __riscv_vfncvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -664,15 +385,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -682,15 +394,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -700,15 +403,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -718,15 +412,6 @@ return __riscv_vfncvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -808,15 +493,6 @@ return __riscv_vfncvt_f_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -826,15 +502,6 @@ return __riscv_vfncvt_f_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -844,15 +511,6 @@ return __riscv_vfncvt_f_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -862,15 +520,6 @@ return __riscv_vfncvt_f_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -880,15 +529,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -898,15 +538,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -916,15 +547,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -934,15 +556,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -952,15 +565,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -970,15 +574,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -988,15 +583,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1006,15 +592,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1024,15 +601,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1042,15 +610,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1060,15 +619,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1078,15 +628,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1096,15 +637,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1114,15 +646,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1132,15 +655,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1150,15 +664,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1168,15 +673,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1186,15 +682,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1204,15 +691,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1222,15 +700,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1240,15 +709,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1258,15 +718,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1366,15 +817,6 @@ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1384,15 +826,6 @@ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1402,15 +835,6 @@ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1420,15 +844,6 @@ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1438,15 +853,6 @@ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1456,15 +862,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1474,15 +871,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1492,15 +880,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1510,15 +889,6 @@ return __riscv_vfncvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1528,15 +898,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1546,15 +907,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1564,15 +916,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1582,15 +925,6 @@ return __riscv_vfncvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1672,15 +1006,6 @@ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1690,15 +1015,6 @@ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1708,15 +1024,6 @@ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1726,15 +1033,6 @@ return __riscv_vfncvt_f_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1744,15 +1042,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1762,15 +1051,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1780,15 +1060,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1798,15 +1069,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1816,15 +1078,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1834,15 +1087,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1852,15 +1096,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1870,15 +1105,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1888,15 +1114,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1906,15 +1123,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1924,15 +1132,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1942,15 +1141,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1960,15 +1150,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1978,15 +1159,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1996,15 +1168,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2014,15 +1177,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2032,15 +1186,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2050,15 +1195,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2068,15 +1204,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2086,15 +1213,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2104,15 +1222,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2122,15 +1231,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2230,15 +1330,6 @@ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2248,15 +1339,6 @@ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2266,15 +1348,6 @@ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2284,31 +1357,13 @@ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // vfloat16m4_t test_vfncvt_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); -} - -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); + return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_tumu( @@ -2320,15 +1375,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2338,15 +1384,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2356,15 +1393,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2374,15 +1402,6 @@ return __riscv_vfncvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2392,15 +1411,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2410,15 +1420,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2428,15 +1429,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2446,15 +1438,6 @@ return __riscv_vfncvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2536,15 +1519,6 @@ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2554,15 +1528,6 @@ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2572,15 +1537,6 @@ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2590,15 +1546,6 @@ return __riscv_vfncvt_f_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2608,15 +1555,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2626,15 +1564,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2644,15 +1573,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2662,15 +1582,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2680,15 +1591,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2698,15 +1600,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2716,15 +1609,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2734,15 +1618,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2752,15 +1627,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2770,15 +1636,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2788,15 +1645,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2806,15 +1654,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2824,15 +1663,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2842,15 +1672,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2860,15 +1681,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2878,15 +1690,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2896,15 +1699,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2914,15 +1708,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2932,15 +1717,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2950,15 +1726,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2968,15 +1735,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2986,15 +1744,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3094,15 +1843,6 @@ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3112,15 +1852,6 @@ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3130,15 +1861,6 @@ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3148,15 +1870,6 @@ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3166,15 +1879,6 @@ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3184,15 +1888,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3202,15 +1897,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3220,15 +1906,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3238,15 +1915,6 @@ return __riscv_vfncvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3256,15 +1924,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3274,15 +1933,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3292,15 +1942,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3310,15 +1951,6 @@ return __riscv_vfncvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3400,15 +2032,6 @@ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3418,15 +2041,6 @@ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3436,15 +2050,6 @@ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -3454,12 +2059,3 @@ return __riscv_vfncvt_f_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); -} - diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rod.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rod.c @@ -0,0 +1,333 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rod_f_mu(mask, maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rtz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfncvt_rtz.c @@ -0,0 +1,1089 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt.c @@ -124,15 +124,6 @@ return __riscv_vfwcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -142,15 +133,6 @@ return __riscv_vfwcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -160,15 +142,6 @@ return __riscv_vfwcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -178,15 +151,6 @@ return __riscv_vfwcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -196,15 +160,6 @@ return __riscv_vfwcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -214,15 +169,6 @@ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -232,15 +178,6 @@ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -250,15 +187,6 @@ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -268,15 +196,6 @@ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -286,15 +205,6 @@ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -439,15 +349,6 @@ return __riscv_vfwcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -457,15 +358,6 @@ return __riscv_vfwcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -475,15 +367,6 @@ return __riscv_vfwcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -493,15 +376,6 @@ return __riscv_vfwcvt_x_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -511,15 +385,6 @@ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -529,15 +394,6 @@ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -547,15 +403,6 @@ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -565,15 +412,6 @@ return __riscv_vfwcvt_xu_tu(maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) @@ -799,15 +637,6 @@ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -817,15 +646,6 @@ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -835,15 +655,6 @@ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -853,15 +664,6 @@ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -871,15 +673,6 @@ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -889,15 +682,6 @@ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -907,15 +691,6 @@ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -925,15 +700,6 @@ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -943,15 +709,6 @@ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -961,15 +718,6 @@ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1114,15 +862,6 @@ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1132,15 +871,6 @@ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1150,15 +880,6 @@ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1168,15 +889,6 @@ return __riscv_vfwcvt_x_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1186,15 +898,6 @@ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1204,15 +907,6 @@ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1222,15 +916,6 @@ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1240,15 +925,6 @@ return __riscv_vfwcvt_xu_tum(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -1474,15 +1150,6 @@ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1492,15 +1159,6 @@ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1510,15 +1168,6 @@ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1528,15 +1177,6 @@ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1546,15 +1186,6 @@ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1564,15 +1195,6 @@ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1582,15 +1204,6 @@ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1600,15 +1213,6 @@ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1618,15 +1222,6 @@ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1636,15 +1231,6 @@ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1789,15 +1375,6 @@ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1807,15 +1384,6 @@ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1825,15 +1393,6 @@ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1843,15 +1402,6 @@ return __riscv_vfwcvt_x_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1861,15 +1411,6 @@ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1879,15 +1420,6 @@ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1897,15 +1429,6 @@ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -1915,15 +1438,6 @@ return __riscv_vfwcvt_xu_tumu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -2149,15 +1663,6 @@ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2167,15 +1672,6 @@ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2185,15 +1681,6 @@ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2203,15 +1690,6 @@ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2221,15 +1699,6 @@ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2239,15 +1708,6 @@ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2257,15 +1717,6 @@ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2275,15 +1726,6 @@ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2293,15 +1735,6 @@ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2311,15 +1744,6 @@ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2464,15 +1888,6 @@ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2482,15 +1897,6 @@ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2500,15 +1906,6 @@ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2518,15 +1915,6 @@ return __riscv_vfwcvt_x_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2536,15 +1924,6 @@ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2554,15 +1933,6 @@ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2572,15 +1942,6 @@ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -2590,15 +1951,6 @@ return __riscv_vfwcvt_xu_mu(mask, maskedoff, src, vl); } -// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); -} - // CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt_rtz.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfwcvt_rtz.c @@ -0,0 +1,657 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tum(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_tumu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_x_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_rtz_xu_mu(mask, maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext_vf2.c rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext_vf2.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext_vf2.c @@ -60,87 +60,6 @@ return __riscv_vsext_vf2_tu(maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsext_vf4_i32m1_tu(vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsext_vf4_i32m2_tu(vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsext_vf4_i32m4_tu(vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsext_vf4_i32m8_tu(vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_tu(maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) @@ -186,42 +105,6 @@ return __riscv_vsext_vf2_tu(maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf4_i64m1_tu(vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf4_i64m2_tu(vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf4_i64m4_tu(vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_tu(maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) @@ -312,87 +195,6 @@ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -438,42 +240,6 @@ return __riscv_vsext_vf2_tum(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -564,87 +330,6 @@ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -690,42 +375,6 @@ return __riscv_vsext_vf2_tumu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -816,87 +465,6 @@ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { - return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { - return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { - return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { - return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -942,42 +510,6 @@ return __riscv_vsext_vf2_mu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { - return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext_vf4.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext_vf4.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext_vf4.c @@ -0,0 +1,332 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1_tu(vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2_tu(vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4_tu(vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8_tu(vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { + return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1_tu(vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2_tu(vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4_tu(vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { + return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { + return __riscv_vsext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { + return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { + return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { + return __riscv_vsext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { + return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { + return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { + return __riscv_vsext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t mask, vint32mf2_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t mask, vint32m1_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t mask, vint32m2_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t mask, vint32m4_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t mask, vint32m8_t maskedoff, vint8m2_t op1, size_t vl) { + return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint16mf4_t op1, size_t vl) { + return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint16mf2_t op1, size_t vl) { + return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint16m1_t op1, size_t vl) { + return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint16m2_t op1, size_t vl) { + return __riscv_vsext_vf4_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext_vf8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext_vf8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vsext_vf8.c @@ -0,0 +1,152 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1_tu(vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2_tu(vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4_tu(vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t mask, vint64m1_t maskedoff, vint8mf8_t op1, size_t vl) { + return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t mask, vint64m2_t maskedoff, vint8mf4_t op1, size_t vl) { + return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t mask, vint64m4_t maskedoff, vint8mf2_t op1, size_t vl) { + return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t mask, vint64m8_t maskedoff, vint8m1_t op1, size_t vl) { + return __riscv_vsext_vf8_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext_vf2.c rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext.c rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext_vf2.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext_vf2.c @@ -60,87 +60,6 @@ return __riscv_vzext_vf2_tu(maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vzext_vf4_u32m4_tu(vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_tu(maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) @@ -186,42 +105,6 @@ return __riscv_vzext_vf2_tu(maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_tu(maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) @@ -312,87 +195,6 @@ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -438,42 +240,6 @@ return __riscv_vzext_vf2_tum(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tum( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tum( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) @@ -564,87 +330,6 @@ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -690,42 +375,6 @@ return __riscv_vzext_vf2_tumu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tumu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_tumu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) @@ -816,87 +465,6 @@ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { - return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { - return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { - return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { - return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) @@ -942,42 +510,6 @@ return __riscv_vzext_vf2_mu(mask, maskedoff, op1, vl); } -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); -} - -// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_mu( -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { - return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); -} - // CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_mu( // CHECK-RV64-NEXT: entry: // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext_vf4.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext_vf4.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext_vf4.c @@ -0,0 +1,332 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4_tu(vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { + return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { + return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { + return __riscv_vzext_vf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { + return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { + return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { + return __riscv_vzext_vf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { + return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { + return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { + return __riscv_vzext_vf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t mask, vuint32mf2_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t mask, vuint32m1_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t mask, vuint32m2_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t mask, vuint32m4_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t mask, vuint32m8_t maskedoff, vuint8m2_t op1, size_t vl) { + return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint16mf4_t op1, size_t vl) { + return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint16mf2_t op1, size_t vl) { + return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint16m1_t op1, size_t vl) { + return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint16m2_t op1, size_t vl) { + return __riscv_vzext_vf4_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext_vf8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext_vf8.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vzext_vf8.c @@ -0,0 +1,152 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tum( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_tumu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t mask, vuint64m1_t maskedoff, vuint8mf8_t op1, size_t vl) { + return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t mask, vuint64m2_t maskedoff, vuint8mf4_t op1, size_t vl) { + return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t mask, vuint64m4_t maskedoff, vuint8mf2_t op1, size_t vl) { + return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_mu( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t mask, vuint64m8_t maskedoff, vuint8m1_t op1, size_t vl) { + return __riscv_vzext_vf8_mu(mask, maskedoff, op1, vl); +} +