diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -192,6 +192,9 @@ // undisturbed. Policy NoMaskPolicy = NonePolicy; + // Emit constrained intrinsic based on IsFPConstrained(). + bit SupportsStrictFP = false; + // This builtin supports non-masked function overloading api. // All masked operations support overloading api. bit HasNoMaskedOverloaded = true; @@ -1913,33 +1916,45 @@ // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions let NoMaskPolicy = HasPassthruOperand in { -def vfcvt_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_xu">; -def vfcvt_x_f_v : RVVConvToSignedBuiltin<"vfcvt_x">; +let SupportsStrictFP = true in { + def vfcvt_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_xu">; + def vfcvt_x_f_v : RVVConvToSignedBuiltin<"vfcvt_x">; +} def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">; def vfcvt_rtz_x_f_v : RVVConvToSignedBuiltin<"vfcvt_rtz_x">; -def vfcvt_f_xu_v : RVVConvBuiltin<"Fv", "FvUv", "sil", "vfcvt_f">; -def vfcvt_f_x_v : RVVConvBuiltin<"Fv", "Fvv", "sil", "vfcvt_f">; +let SupportsStrictFP = true in { + def vfcvt_f_xu_v : RVVConvBuiltin<"Fv", "FvUv", "sil", "vfcvt_f">; + def vfcvt_f_x_v : RVVConvBuiltin<"Fv", "Fvv", "sil", "vfcvt_f">; +} // 14.18. Widening Floating-Point/Integer Type-Convert Instructions let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { +let SupportsStrictFP = true in { def vfwcvt_xu_f_v : RVVConvToWidenUnsignedBuiltin<"vfwcvt_xu">; def vfwcvt_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_x">; +} def vfwcvt_rtz_xu_f_v : RVVConvToWidenUnsignedBuiltin<"vfwcvt_rtz_xu">; def vfwcvt_rtz_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_rtz_x">; +let SupportsStrictFP = true in { def vfwcvt_f_xu_v : RVVConvBuiltin<"Fw", "FwUv", "csi", "vfwcvt_f">; def vfwcvt_f_x_v : RVVConvBuiltin<"Fw", "Fwv", "csi", "vfwcvt_f">; def vfwcvt_f_f_v : RVVConvBuiltin<"w", "wv", "xf", "vfwcvt_f">; } +} // 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { +let SupportsStrictFP = true in { def vfncvt_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_xu">; def vfncvt_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_x">; +} def vfncvt_rtz_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_rtz_xu">; def vfncvt_rtz_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_rtz_x">; +let SupportsStrictFP = true in { def vfncvt_f_xu_w : RVVConvBuiltin<"Fv", "FvUw", "csi", "vfncvt_f">; def vfncvt_f_x_w : RVVConvBuiltin<"Fv", "Fvw", "csi", "vfncvt_f">; def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_f">; +} def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">; } } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfcvt.c @@ -2,6 +2,8 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v -fexperimental-strict-floating-point -frounding-math \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64-FENV %s #include @@ -10,6 +12,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { return vfcvt_x(src, vl); } @@ -19,6 +26,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { return vfcvt_rtz_x(src, vl); } @@ -28,6 +40,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { return vfcvt_x(src, vl); } @@ -37,6 +54,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { return vfcvt_rtz_x(src, vl); } @@ -46,6 +68,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { return vfcvt_x(src, vl); } @@ -55,6 +82,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { return vfcvt_rtz_x(src, vl); } @@ -64,6 +96,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { return vfcvt_x(src, vl); } @@ -73,6 +110,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { return vfcvt_rtz_x(src, vl); } @@ -82,6 +124,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { return vfcvt_x(src, vl); } @@ -91,6 +138,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { return vfcvt_rtz_x(src, vl); } @@ -100,6 +152,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { return vfcvt_xu(src, vl); } @@ -109,6 +166,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { return vfcvt_rtz_xu(src, vl); } @@ -118,6 +180,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { return vfcvt_xu(src, vl); } @@ -127,6 +194,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { return vfcvt_rtz_xu(src, vl); } @@ -136,6 +208,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { return vfcvt_xu(src, vl); } @@ -145,6 +222,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { return vfcvt_rtz_xu(src, vl); } @@ -154,6 +236,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { return vfcvt_xu(src, vl); } @@ -163,6 +250,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { return vfcvt_rtz_xu(src, vl); } @@ -172,6 +264,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { return vfcvt_xu(src, vl); } @@ -181,6 +278,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { return vfcvt_rtz_xu(src, vl); } @@ -190,6 +292,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -199,6 +306,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f32.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -208,6 +320,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f32.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -217,6 +334,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f32.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -226,6 +348,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv16f32.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -235,6 +362,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -244,6 +376,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -253,6 +390,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -262,6 +404,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -271,6 +418,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -280,6 +432,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { return vfcvt_x(src, vl); } @@ -289,6 +446,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { return vfcvt_rtz_x(src, vl); } @@ -298,6 +460,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { return vfcvt_x(src, vl); } @@ -307,6 +474,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { return vfcvt_rtz_x(src, vl); } @@ -316,6 +488,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { return vfcvt_x(src, vl); } @@ -325,6 +502,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { return vfcvt_rtz_x(src, vl); } @@ -334,6 +516,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { return vfcvt_x(src, vl); } @@ -343,6 +530,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { return vfcvt_rtz_x(src, vl); } @@ -352,6 +544,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { return vfcvt_xu(src, vl); } @@ -361,6 +558,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { return vfcvt_rtz_xu(src, vl); } @@ -370,6 +572,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { return vfcvt_xu(src, vl); } @@ -379,6 +586,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { return vfcvt_rtz_xu(src, vl); } @@ -388,6 +600,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { return vfcvt_xu(src, vl); } @@ -397,6 +614,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { return vfcvt_rtz_xu(src, vl); } @@ -406,6 +628,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { return vfcvt_xu(src, vl); } @@ -415,6 +642,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { return vfcvt_rtz_xu(src, vl); } @@ -424,6 +656,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -433,6 +670,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f64.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -442,6 +684,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f64.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -451,6 +698,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f64.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -460,6 +712,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -469,6 +726,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -478,6 +740,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -487,6 +754,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { return vfcvt_f(src, vl); } @@ -496,6 +768,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfcvt_x(mask, maskedoff, src, vl); @@ -506,6 +783,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfcvt_rtz_x(mask, maskedoff, src, vl); @@ -516,6 +798,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { return vfcvt_x(mask, maskedoff, src, vl); @@ -526,6 +813,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { return vfcvt_rtz_x(mask, maskedoff, src, vl); @@ -536,6 +828,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { return vfcvt_x(mask, maskedoff, src, vl); @@ -546,6 +843,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { return vfcvt_rtz_x(mask, maskedoff, src, vl); @@ -556,6 +858,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { return vfcvt_x(mask, maskedoff, src, vl); @@ -566,6 +873,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { return vfcvt_rtz_x(mask, maskedoff, src, vl); @@ -576,6 +888,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { return vfcvt_x(mask, maskedoff, src, vl); @@ -586,6 +903,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { return vfcvt_rtz_x(mask, maskedoff, src, vl); @@ -596,6 +918,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfcvt_xu(mask, maskedoff, src, vl); @@ -606,6 +933,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { @@ -617,6 +949,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { return vfcvt_xu(mask, maskedoff, src, vl); @@ -627,6 +964,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { return vfcvt_rtz_xu(mask, maskedoff, src, vl); @@ -637,6 +979,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { return vfcvt_xu(mask, maskedoff, src, vl); @@ -647,6 +994,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { return vfcvt_rtz_xu(mask, maskedoff, src, vl); @@ -657,6 +1009,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { return vfcvt_xu(mask, maskedoff, src, vl); @@ -667,6 +1024,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { return vfcvt_rtz_xu(mask, maskedoff, src, vl); @@ -677,6 +1039,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { return vfcvt_xu(mask, maskedoff, src, vl); @@ -687,6 +1054,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { return vfcvt_rtz_xu(mask, maskedoff, src, vl); @@ -697,6 +1069,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -707,6 +1084,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -717,6 +1099,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -727,6 +1114,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -737,6 +1129,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -747,6 +1144,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { @@ -758,6 +1160,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -768,6 +1175,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -778,6 +1190,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -788,6 +1205,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -798,6 +1220,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { return vfcvt_x(mask, maskedoff, src, vl); @@ -808,6 +1235,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { return vfcvt_rtz_x(mask, maskedoff, src, vl); @@ -818,6 +1250,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { return vfcvt_x(mask, maskedoff, src, vl); @@ -828,6 +1265,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { return vfcvt_rtz_x(mask, maskedoff, src, vl); @@ -838,6 +1280,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { return vfcvt_x(mask, maskedoff, src, vl); @@ -848,6 +1295,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { return vfcvt_rtz_x(mask, maskedoff, src, vl); @@ -858,6 +1310,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { return vfcvt_x(mask, maskedoff, src, vl); @@ -868,6 +1325,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { return vfcvt_rtz_x(mask, maskedoff, src, vl); @@ -878,6 +1340,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { return vfcvt_xu(mask, maskedoff, src, vl); @@ -888,6 +1355,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { return vfcvt_rtz_xu(mask, maskedoff, src, vl); @@ -898,6 +1370,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { return vfcvt_xu(mask, maskedoff, src, vl); @@ -908,6 +1385,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { return vfcvt_rtz_xu(mask, maskedoff, src, vl); @@ -918,6 +1400,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { return vfcvt_xu(mask, maskedoff, src, vl); @@ -928,6 +1415,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { return vfcvt_rtz_xu(mask, maskedoff, src, vl); @@ -938,6 +1430,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { return vfcvt_xu(mask, maskedoff, src, vl); @@ -948,6 +1445,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { return vfcvt_rtz_xu(mask, maskedoff, src, vl); @@ -958,6 +1460,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -968,6 +1475,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -978,6 +1490,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -988,6 +1505,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -998,6 +1520,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -1008,6 +1535,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -1018,6 +1550,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); @@ -1028,6 +1565,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { return vfcvt_f(mask, maskedoff, src, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfncvt.c @@ -2,6 +2,8 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v -fexperimental-strict-floating-point -frounding-math \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64-FENV %s #include @@ -10,6 +12,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { return vfncvt_x(src, vl); } @@ -19,6 +26,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { return vfncvt_rtz_x(src, vl); } @@ -28,6 +40,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { return vfncvt_x(src, vl); } @@ -37,6 +54,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { return vfncvt_rtz_x(src, vl); } @@ -46,6 +68,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { return vfncvt_x(src, vl); } @@ -55,6 +82,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { return vfncvt_rtz_x(src, vl); } @@ -64,6 +96,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { return vfncvt_x(src, vl); } @@ -73,6 +110,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { return vfncvt_rtz_x(src, vl); } @@ -82,6 +124,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { return vfncvt_x(src, vl); } @@ -91,6 +138,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { return vfncvt_rtz_x(src, vl); } @@ -100,6 +152,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { return vfncvt_xu(src, vl); } @@ -109,6 +166,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { return vfncvt_rtz_xu(src, vl); } @@ -118,6 +180,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { return vfncvt_xu(src, vl); } @@ -127,6 +194,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { return vfncvt_rtz_xu(src, vl); } @@ -136,6 +208,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { return vfncvt_xu(src, vl); } @@ -145,6 +222,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { return vfncvt_rtz_xu(src, vl); } @@ -154,6 +236,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { return vfncvt_xu(src, vl); } @@ -163,6 +250,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { return vfncvt_rtz_xu(src, vl); } @@ -172,6 +264,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { return vfncvt_xu(src, vl); } @@ -181,6 +278,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { return vfncvt_rtz_xu(src, vl); } @@ -190,6 +292,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { return vfncvt_x(src, vl); } @@ -199,6 +306,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { return vfncvt_rtz_x(src, vl); } @@ -208,6 +320,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { return vfncvt_x(src, vl); } @@ -217,6 +334,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { return vfncvt_rtz_x(src, vl); } @@ -226,6 +348,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { return vfncvt_x(src, vl); } @@ -235,6 +362,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { return vfncvt_rtz_x(src, vl); } @@ -244,6 +376,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { return vfncvt_x(src, vl); } @@ -253,6 +390,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { return vfncvt_rtz_x(src, vl); } @@ -262,6 +404,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { return vfncvt_xu(src, vl); } @@ -271,6 +418,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { return vfncvt_rtz_xu(src, vl); } @@ -280,6 +432,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { return vfncvt_xu(src, vl); } @@ -289,6 +446,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { return vfncvt_rtz_xu(src, vl); } @@ -298,6 +460,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { return vfncvt_xu(src, vl); } @@ -307,6 +474,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { return vfncvt_rtz_xu(src, vl); } @@ -316,6 +488,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { return vfncvt_xu(src, vl); } @@ -325,6 +502,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { return vfncvt_rtz_xu(src, vl); } @@ -334,6 +516,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) { return vfncvt_f(src, vl); } @@ -343,6 +530,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv2f32.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) { return vfncvt_f(src, vl); } @@ -352,6 +544,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv4f32.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) { return vfncvt_f(src, vl); } @@ -361,6 +558,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv8f32.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) { return vfncvt_f(src, vl); } @@ -370,6 +572,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) { return vfncvt_f(src, vl); } @@ -379,6 +586,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) { return vfncvt_f(src, vl); } @@ -388,6 +600,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) { return vfncvt_f(src, vl); } @@ -397,6 +614,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) { return vfncvt_f(src, vl); } @@ -406,6 +628,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv1f32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { return vfncvt_f(src, vl); } @@ -415,6 +642,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { return vfncvt_rod_f(src, vl); } @@ -424,6 +656,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { return vfncvt_f(src, vl); } @@ -433,6 +670,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { return vfncvt_rod_f(src, vl); } @@ -442,6 +684,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { return vfncvt_f(src, vl); } @@ -451,6 +698,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { return vfncvt_rod_f(src, vl); } @@ -460,6 +712,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { return vfncvt_f(src, vl); } @@ -469,6 +726,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { return vfncvt_rod_f(src, vl); } @@ -478,6 +740,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfncvt_x(mask, maskedoff, src, vl); @@ -488,6 +755,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { @@ -499,6 +771,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfncvt_x(mask, maskedoff, src, vl); @@ -509,6 +786,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { @@ -520,6 +802,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { return vfncvt_x(mask, maskedoff, src, vl); @@ -530,6 +817,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { return vfncvt_rtz_x(mask, maskedoff, src, vl); @@ -540,6 +832,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_x(mask, maskedoff, src, vl); @@ -550,6 +847,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_rtz_x(mask, maskedoff, src, vl); @@ -560,6 +862,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_x(mask, maskedoff, src, vl); @@ -570,6 +877,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_rtz_x(mask, maskedoff, src, vl); @@ -580,6 +892,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfncvt_xu(mask, maskedoff, src, vl); @@ -590,6 +907,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { @@ -601,6 +923,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfncvt_xu(mask, maskedoff, src, vl); @@ -611,6 +938,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { @@ -622,6 +954,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { return vfncvt_xu(mask, maskedoff, src, vl); @@ -632,6 +969,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { @@ -643,6 +985,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_xu(mask, maskedoff, src, vl); @@ -653,6 +1000,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_rtz_xu(mask, maskedoff, src, vl); @@ -663,6 +1015,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_xu(mask, maskedoff, src, vl); @@ -673,6 +1030,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_rtz_xu(mask, maskedoff, src, vl); @@ -683,6 +1045,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { return vfncvt_x(mask, maskedoff, src, vl); @@ -693,6 +1060,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { @@ -704,6 +1076,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_x(mask, maskedoff, src, vl); @@ -714,6 +1091,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_rtz_x(mask, maskedoff, src, vl); @@ -724,6 +1106,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_x(mask, maskedoff, src, vl); @@ -734,6 +1121,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_rtz_x(mask, maskedoff, src, vl); @@ -744,6 +1136,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_x(mask, maskedoff, src, vl); @@ -754,6 +1151,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_rtz_x(mask, maskedoff, src, vl); @@ -764,6 +1166,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { return vfncvt_xu(mask, maskedoff, src, vl); @@ -774,6 +1181,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { @@ -785,6 +1197,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_xu(mask, maskedoff, src, vl); @@ -795,6 +1212,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { @@ -806,6 +1228,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_xu(mask, maskedoff, src, vl); @@ -816,6 +1243,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { @@ -827,6 +1259,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_xu(mask, maskedoff, src, vl); @@ -837,6 +1274,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_rtz_xu(mask, maskedoff, src, vl); @@ -847,6 +1289,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { @@ -858,6 +1305,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { return vfncvt_f(mask, maskedoff, src, vl); @@ -868,6 +1320,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { return vfncvt_f(mask, maskedoff, src, vl); @@ -878,6 +1335,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { return vfncvt_f(mask, maskedoff, src, vl); @@ -888,6 +1350,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { @@ -899,6 +1366,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { return vfncvt_f(mask, maskedoff, src, vl); @@ -909,6 +1381,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { return vfncvt_f(mask, maskedoff, src, vl); @@ -919,6 +1396,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { return vfncvt_f(mask, maskedoff, src, vl); @@ -929,6 +1411,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { @@ -940,6 +1427,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { @@ -951,6 +1443,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_f(mask, maskedoff, src, vl); @@ -961,6 +1458,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { @@ -972,6 +1474,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_f(mask, maskedoff, src, vl); @@ -982,6 +1489,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { @@ -993,6 +1505,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_f(mask, maskedoff, src, vl); @@ -1003,6 +1520,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwcvt.c @@ -2,6 +2,8 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +v -fexperimental-strict-floating-point -frounding-math \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64-FENV %s #include @@ -10,6 +12,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7:[0-9]+]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -19,6 +26,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -28,6 +40,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -37,6 +54,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -46,6 +68,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -55,6 +82,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -64,6 +96,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -73,6 +110,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -82,6 +124,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -91,6 +138,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -100,6 +152,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { return vfwcvt_x(src, vl); } @@ -109,6 +166,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { return vfwcvt_rtz_x(src, vl); } @@ -118,6 +180,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { return vfwcvt_x(src, vl); } @@ -127,6 +194,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { return vfwcvt_rtz_x(src, vl); } @@ -136,6 +208,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { return vfwcvt_x(src, vl); } @@ -145,6 +222,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { return vfwcvt_rtz_x(src, vl); } @@ -154,6 +236,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { return vfwcvt_x(src, vl); } @@ -163,6 +250,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { return vfwcvt_rtz_x(src, vl); } @@ -172,6 +264,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { return vfwcvt_xu(src, vl); } @@ -181,6 +278,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { return vfwcvt_rtz_xu(src, vl); } @@ -190,6 +292,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { return vfwcvt_xu(src, vl); } @@ -199,6 +306,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { return vfwcvt_rtz_xu(src, vl); } @@ -208,6 +320,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { return vfwcvt_xu(src, vl); } @@ -217,6 +334,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { return vfwcvt_rtz_xu(src, vl); } @@ -226,6 +348,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { return vfwcvt_xu(src, vl); } @@ -235,6 +362,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { return vfwcvt_rtz_xu(src, vl); } @@ -244,6 +376,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -253,6 +390,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -262,6 +404,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -271,6 +418,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -280,6 +432,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -289,6 +446,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -298,6 +460,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -307,6 +474,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -316,6 +488,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -325,6 +502,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -334,6 +516,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -343,6 +530,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { return vfwcvt_f(src, vl); } @@ -352,6 +544,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { @@ -363,6 +560,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -373,6 +575,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -383,6 +590,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -393,6 +605,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -403,6 +620,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { @@ -414,6 +636,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -424,6 +651,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -434,6 +666,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -444,6 +681,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -454,6 +696,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_x(mask, maskedoff, src, vl); @@ -464,6 +711,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_rtz_x(mask, maskedoff, src, vl); @@ -474,6 +726,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_x(mask, maskedoff, src, vl); @@ -484,6 +741,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_rtz_x(mask, maskedoff, src, vl); @@ -494,6 +756,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_x(mask, maskedoff, src, vl); @@ -504,6 +771,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_rtz_x(mask, maskedoff, src, vl); @@ -514,6 +786,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_x(mask, maskedoff, src, vl); @@ -524,6 +801,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_rtz_x(mask, maskedoff, src, vl); @@ -534,6 +816,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_xu(mask, maskedoff, src, vl); @@ -544,6 +831,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { @@ -555,6 +847,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_xu(mask, maskedoff, src, vl); @@ -565,6 +862,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { @@ -576,6 +878,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_xu(mask, maskedoff, src, vl); @@ -586,6 +893,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { @@ -597,6 +909,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_xu(mask, maskedoff, src, vl); @@ -607,6 +924,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_rtz_xu(mask, maskedoff, src, vl); @@ -617,6 +939,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -627,6 +954,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -637,6 +969,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -647,6 +984,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -657,6 +999,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -667,6 +1014,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -677,6 +1029,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -687,6 +1044,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -697,6 +1059,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -707,6 +1074,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -717,6 +1089,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); @@ -727,6 +1104,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR7]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_f(mask, maskedoff, src, vl); diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfcvt.c @@ -3,6 +3,9 @@ // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ // RUN: -target-feature +v -target-feature +zfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -fexperimental-strict-floating-point -frounding-math \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64-FENV %s #include @@ -11,6 +14,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { return vfcvt_x_f_v_i32mf2(src, vl); } @@ -20,6 +28,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32mf2(src, vl); } @@ -29,6 +42,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { return vfcvt_x_f_v_i32m1(src, vl); } @@ -38,6 +56,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32m1(src, vl); } @@ -47,6 +70,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { return vfcvt_x_f_v_i32m2(src, vl); } @@ -56,6 +84,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32m2(src, vl); } @@ -65,6 +98,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { return vfcvt_x_f_v_i32m4(src, vl); } @@ -74,6 +112,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32m4(src, vl); } @@ -83,6 +126,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { return vfcvt_x_f_v_i32m8(src, vl); } @@ -92,6 +140,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32m8(src, vl); } @@ -101,6 +154,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { return vfcvt_xu_f_v_u32mf2(src, vl); } @@ -110,6 +168,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32mf2(src, vl); } @@ -119,6 +182,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { return vfcvt_xu_f_v_u32m1(src, vl); } @@ -128,6 +196,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32m1(src, vl); } @@ -137,6 +210,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { return vfcvt_xu_f_v_u32m2(src, vl); } @@ -146,6 +224,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32m2(src, vl); } @@ -155,6 +238,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { return vfcvt_xu_f_v_u32m4(src, vl); } @@ -164,6 +252,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32m4(src, vl); } @@ -173,6 +266,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { return vfcvt_xu_f_v_u32m8(src, vl); } @@ -182,6 +280,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32m8(src, vl); } @@ -191,6 +294,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) { return vfcvt_f_x_v_f32mf2(src, vl); } @@ -200,6 +308,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f32.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) { return vfcvt_f_x_v_f32m1(src, vl); } @@ -209,6 +322,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f32.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) { return vfcvt_f_x_v_f32m2(src, vl); } @@ -218,6 +336,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f32.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) { return vfcvt_f_x_v_f32m4(src, vl); } @@ -227,6 +350,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv16f32.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) { return vfcvt_f_x_v_f32m8(src, vl); } @@ -236,6 +364,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) { return vfcvt_f_xu_v_f32mf2(src, vl); } @@ -245,6 +378,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) { return vfcvt_f_xu_v_f32m1(src, vl); } @@ -254,6 +392,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) { return vfcvt_f_xu_v_f32m2(src, vl); } @@ -263,6 +406,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) { return vfcvt_f_xu_v_f32m4(src, vl); } @@ -272,6 +420,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) { return vfcvt_f_xu_v_f32m8(src, vl); } @@ -281,6 +434,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { return vfcvt_x_f_v_i64m1(src, vl); } @@ -290,6 +448,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) { return vfcvt_rtz_x_f_v_i64m1(src, vl); } @@ -299,6 +462,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { return vfcvt_x_f_v_i64m2(src, vl); } @@ -308,6 +476,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i64m2(src, vl); } @@ -317,6 +490,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { return vfcvt_x_f_v_i64m4(src, vl); } @@ -326,6 +504,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i64m4(src, vl); } @@ -335,6 +518,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { return vfcvt_x_f_v_i64m8(src, vl); } @@ -344,6 +532,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) { return vfcvt_rtz_x_f_v_i64m8(src, vl); } @@ -353,6 +546,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { return vfcvt_xu_f_v_u64m1(src, vl); } @@ -362,6 +560,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u64m1(src, vl); } @@ -371,6 +574,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { return vfcvt_xu_f_v_u64m2(src, vl); } @@ -380,6 +588,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u64m2(src, vl); } @@ -389,6 +602,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { return vfcvt_xu_f_v_u64m4(src, vl); } @@ -398,6 +616,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u64m4(src, vl); } @@ -407,6 +630,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { return vfcvt_xu_f_v_u64m8(src, vl); } @@ -416,6 +644,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u64m8(src, vl); } @@ -425,6 +658,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f64.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) { return vfcvt_f_x_v_f64m1(src, vl); } @@ -434,6 +672,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f64.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) { return vfcvt_f_x_v_f64m2(src, vl); } @@ -443,6 +686,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f64.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) { return vfcvt_f_x_v_f64m4(src, vl); } @@ -452,6 +700,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f64.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) { return vfcvt_f_x_v_f64m8(src, vl); } @@ -461,6 +714,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) { return vfcvt_f_xu_v_f64m1(src, vl); } @@ -470,6 +728,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) { return vfcvt_f_xu_v_f64m2(src, vl); } @@ -479,6 +742,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) { return vfcvt_f_xu_v_f64m4(src, vl); } @@ -488,6 +756,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { return vfcvt_f_xu_v_f64m8(src, vl); } @@ -497,6 +770,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfcvt_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfcvt_x_f_v_i32mf2_m(mask, maskedoff, src, vl); @@ -507,6 +785,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32mf2_m(mask, maskedoff, src, vl); @@ -517,6 +800,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { return vfcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl); @@ -527,6 +815,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32m1_m(mask, maskedoff, src, vl); @@ -537,6 +830,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { return vfcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl); @@ -547,6 +845,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32m2_m(mask, maskedoff, src, vl); @@ -557,6 +860,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { return vfcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl); @@ -567,6 +875,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32m4_m(mask, maskedoff, src, vl); @@ -577,6 +890,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { return vfcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl); @@ -587,6 +905,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { return vfcvt_rtz_x_f_v_i32m8_m(mask, maskedoff, src, vl); @@ -597,6 +920,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfcvt_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); @@ -607,6 +935,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) { @@ -618,6 +951,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { return vfcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl); @@ -628,6 +966,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32m1_m(mask, maskedoff, src, vl); @@ -638,6 +981,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { return vfcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl); @@ -648,6 +996,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32m2_m(mask, maskedoff, src, vl); @@ -658,6 +1011,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { return vfcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl); @@ -668,6 +1026,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32m4_m(mask, maskedoff, src, vl); @@ -678,6 +1041,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { return vfcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl); @@ -688,6 +1056,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u32m8_m(mask, maskedoff, src, vl); @@ -698,6 +1071,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint32mf2_t src, size_t vl) { return vfcvt_f_x_v_f32mf2_m(mask, maskedoff, src, vl); @@ -708,6 +1086,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { return vfcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl); @@ -718,6 +1101,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { return vfcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl); @@ -728,6 +1116,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { return vfcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl); @@ -738,6 +1131,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { return vfcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl); @@ -748,6 +1146,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint32mf2_t src, size_t vl) { @@ -759,6 +1162,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { return vfcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl); @@ -769,6 +1177,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { return vfcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl); @@ -779,6 +1192,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { return vfcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl); @@ -789,6 +1207,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { return vfcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl); @@ -799,6 +1222,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { return vfcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl); @@ -809,6 +1237,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { return vfcvt_rtz_x_f_v_i64m1_m(mask, maskedoff, src, vl); @@ -819,6 +1252,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { return vfcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl); @@ -829,6 +1267,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i64m2_m(mask, maskedoff, src, vl); @@ -839,6 +1282,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { return vfcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl); @@ -849,6 +1297,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i64m4_m(mask, maskedoff, src, vl); @@ -859,6 +1312,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { return vfcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl); @@ -869,6 +1327,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { return vfcvt_rtz_x_f_v_i64m8_m(mask, maskedoff, src, vl); @@ -879,6 +1342,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { return vfcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl); @@ -889,6 +1357,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u64m1_m(mask, maskedoff, src, vl); @@ -899,6 +1372,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { return vfcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl); @@ -909,6 +1387,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u64m2_m(mask, maskedoff, src, vl); @@ -919,6 +1402,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { return vfcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl); @@ -929,6 +1417,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u64m4_m(mask, maskedoff, src, vl); @@ -939,6 +1432,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { return vfcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl); @@ -949,6 +1447,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u64m8_m(mask, maskedoff, src, vl); @@ -959,6 +1462,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { return vfcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl); @@ -969,6 +1477,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { return vfcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl); @@ -979,6 +1492,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { return vfcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl); @@ -989,6 +1507,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { return vfcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl); @@ -999,6 +1522,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { return vfcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl); @@ -1009,6 +1537,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { return vfcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl); @@ -1019,6 +1552,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { return vfcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl); @@ -1029,6 +1567,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { return vfcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl); @@ -1039,6 +1582,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf4_t test_vfcvt_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) { return vfcvt_x_f_v_i16mf4(src, vl); } @@ -1048,6 +1596,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16mf4(src, vl); } @@ -1057,6 +1610,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf2_t test_vfcvt_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) { return vfcvt_x_f_v_i16mf2(src, vl); } @@ -1066,6 +1624,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16mf2(src, vl); } @@ -1075,6 +1638,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m1_t test_vfcvt_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) { return vfcvt_x_f_v_i16m1(src, vl); } @@ -1084,6 +1652,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m1_t test_vfcvt_rtz_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m1(src, vl); } @@ -1093,6 +1666,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m2_t test_vfcvt_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) { return vfcvt_x_f_v_i16m2(src, vl); } @@ -1102,6 +1680,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m2_t test_vfcvt_rtz_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m2(src, vl); } @@ -1111,6 +1694,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m4_t test_vfcvt_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) { return vfcvt_x_f_v_i16m4(src, vl); } @@ -1120,6 +1708,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m4_t test_vfcvt_rtz_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m4(src, vl); } @@ -1129,6 +1722,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i16m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m8_t test_vfcvt_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) { return vfcvt_x_f_v_i16m8(src, vl); } @@ -1138,6 +1736,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i16m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m8_t test_vfcvt_rtz_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m8(src, vl); } @@ -1147,6 +1750,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf4_t test_vfcvt_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) { return vfcvt_xu_f_v_u16mf4(src, vl); } @@ -1156,6 +1764,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16mf4(src, vl); } @@ -1165,6 +1778,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf2_t test_vfcvt_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) { return vfcvt_xu_f_v_u16mf2(src, vl); } @@ -1174,6 +1792,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16mf2(src, vl); } @@ -1183,6 +1806,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m1_t test_vfcvt_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) { return vfcvt_xu_f_v_u16m1(src, vl); } @@ -1192,6 +1820,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m1(src, vl); } @@ -1201,6 +1834,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m2_t test_vfcvt_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) { return vfcvt_xu_f_v_u16m2(src, vl); } @@ -1210,6 +1848,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m2(src, vl); } @@ -1219,6 +1862,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m4_t test_vfcvt_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) { return vfcvt_xu_f_v_u16m4(src, vl); } @@ -1228,6 +1876,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m4(src, vl); } @@ -1237,6 +1890,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u16m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m8_t test_vfcvt_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) { return vfcvt_xu_f_v_u16m8(src, vl); } @@ -1246,6 +1904,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m8(src, vl); } @@ -1255,6 +1918,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f16.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfcvt_f_x_v_f16mf4 (vint16mf4_t src, size_t vl) { return vfcvt_f_x_v_f16mf4(src, vl); } @@ -1264,6 +1932,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f16.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfcvt_f_x_v_f16mf2 (vint16mf2_t src, size_t vl) { return vfcvt_f_x_v_f16mf2(src, vl); } @@ -1273,6 +1946,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f16.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfcvt_f_x_v_f16m1 (vint16m1_t src, size_t vl) { return vfcvt_f_x_v_f16m1(src, vl); } @@ -1282,6 +1960,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f16.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfcvt_f_x_v_f16m2 (vint16m2_t src, size_t vl) { return vfcvt_f_x_v_f16m2(src, vl); } @@ -1291,6 +1974,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv16f16.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfcvt_f_x_v_f16m4 (vint16m4_t src, size_t vl) { return vfcvt_f_x_v_f16m4(src, vl); } @@ -1300,6 +1988,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f16m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.nxv32f16.nxv32i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m8_t test_vfcvt_f_x_v_f16m8 (vint16m8_t src, size_t vl) { return vfcvt_f_x_v_f16m8(src, vl); } @@ -1309,6 +2002,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4 (vuint16mf4_t src, size_t vl) { return vfcvt_f_xu_v_f16mf4(src, vl); } @@ -1318,6 +2016,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2 (vuint16mf2_t src, size_t vl) { return vfcvt_f_xu_v_f16mf2(src, vl); } @@ -1327,6 +2030,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfcvt_f_xu_v_f16m1 (vuint16m1_t src, size_t vl) { return vfcvt_f_xu_v_f16m1(src, vl); } @@ -1336,6 +2044,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfcvt_f_xu_v_f16m2 (vuint16m2_t src, size_t vl) { return vfcvt_f_xu_v_f16m2(src, vl); } @@ -1345,6 +2058,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfcvt_f_xu_v_f16m4 (vuint16m4_t src, size_t vl) { return vfcvt_f_xu_v_f16m4(src, vl); } @@ -1354,6 +2072,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f16m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m8_t test_vfcvt_f_xu_v_f16m8 (vuint16m8_t src, size_t vl) { return vfcvt_f_xu_v_f16m8(src, vl); } @@ -1363,6 +2086,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf4_t test_vfcvt_x_f_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfcvt_x_f_v_i16mf4_m(mask, maskedoff, src, vl); } @@ -1372,6 +2100,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16mf4_m(mask, maskedoff, src, vl); } @@ -1381,6 +2114,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf2_t test_vfcvt_x_f_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfcvt_x_f_v_i16mf2_m(mask, maskedoff, src, vl); } @@ -1390,6 +2128,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16mf2_m(mask, maskedoff, src, vl); } @@ -1399,6 +2142,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m1_t test_vfcvt_x_f_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { return vfcvt_x_f_v_i16m1_m(mask, maskedoff, src, vl); } @@ -1408,6 +2156,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m1_m(mask, maskedoff, src, vl); } @@ -1417,6 +2170,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m2_t test_vfcvt_x_f_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { return vfcvt_x_f_v_i16m2_m(mask, maskedoff, src, vl); } @@ -1426,6 +2184,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m2_m(mask, maskedoff, src, vl); } @@ -1435,6 +2198,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m4_t test_vfcvt_x_f_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { return vfcvt_x_f_v_i16m4_m(mask, maskedoff, src, vl); } @@ -1444,6 +2212,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m4_m(mask, maskedoff, src, vl); } @@ -1453,6 +2226,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_x_f_v_i16m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m8_t test_vfcvt_x_f_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { return vfcvt_x_f_v_i16m8_m(mask, maskedoff, src, vl); } @@ -1462,6 +2240,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_x_f_v_i16m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { return vfcvt_rtz_x_f_v_i16m8_m(mask, maskedoff, src, vl); } @@ -1471,6 +2254,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfcvt_xu_f_v_u16mf4_m(mask, maskedoff, src, vl); } @@ -1480,6 +2268,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv1i16.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16mf4_m(mask, maskedoff, src, vl); } @@ -1489,6 +2282,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfcvt_xu_f_v_u16mf2_m(mask, maskedoff, src, vl); } @@ -1498,6 +2296,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv2i16.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16mf2_m(mask, maskedoff, src, vl); } @@ -1507,6 +2310,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m1_t test_vfcvt_xu_f_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { return vfcvt_xu_f_v_u16m1_m(mask, maskedoff, src, vl); } @@ -1516,6 +2324,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m1_m(mask, maskedoff, src, vl); } @@ -1525,6 +2338,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m2_t test_vfcvt_xu_f_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { return vfcvt_xu_f_v_u16m2_m(mask, maskedoff, src, vl); } @@ -1534,6 +2352,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m2_m(mask, maskedoff, src, vl); } @@ -1543,6 +2366,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m4_t test_vfcvt_xu_f_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { return vfcvt_xu_f_v_u16m4_m(mask, maskedoff, src, vl); } @@ -1552,6 +2380,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m4_m(mask, maskedoff, src, vl); } @@ -1561,6 +2394,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_xu_f_v_u16m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m8_t test_vfcvt_xu_f_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { return vfcvt_xu_f_v_u16m8_m(mask, maskedoff, src, vl); } @@ -1570,6 +2408,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.rtz.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { return vfcvt_rtz_xu_f_v_u16m8_m(mask, maskedoff, src, vl); } @@ -1579,6 +2422,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint16mf4_t src, size_t vl) { return vfcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl); } @@ -1588,6 +2436,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint16mf2_t src, size_t vl) { return vfcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl); } @@ -1597,6 +2450,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfcvt_f_x_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { return vfcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl); } @@ -1606,6 +2464,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfcvt_f_x_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { return vfcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); } @@ -1615,6 +2478,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfcvt_f_x_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { return vfcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); } @@ -1624,6 +2492,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_x_v_f16m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m8_t test_vfcvt_f_x_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { return vfcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); } @@ -1633,6 +2506,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint16mf4_t src, size_t vl) { return vfcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl); } @@ -1642,6 +2520,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint16mf2_t src, size_t vl) { return vfcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl); } @@ -1651,6 +2534,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { return vfcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl); } @@ -1660,6 +2548,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { return vfcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); } @@ -1669,6 +2562,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { return vfcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); } @@ -1678,6 +2576,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfcvt_f_xu_v_f16m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { return vfcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfncvt.c @@ -3,6 +3,9 @@ // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ // RUN: -target-feature +v -target-feature +zfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -fexperimental-strict-floating-point -frounding-math \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64-FENV %s #include @@ -11,6 +14,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { return vfncvt_x_f_w_i16mf4(src, vl); } @@ -20,6 +28,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16mf4(src, vl); } @@ -29,6 +42,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { return vfncvt_x_f_w_i16mf2(src, vl); } @@ -38,6 +56,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16mf2(src, vl); } @@ -47,6 +70,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { return vfncvt_x_f_w_i16m1(src, vl); } @@ -56,6 +84,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16m1(src, vl); } @@ -65,6 +98,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { return vfncvt_x_f_w_i16m2(src, vl); } @@ -74,6 +112,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16m2(src, vl); } @@ -83,6 +126,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { return vfncvt_x_f_w_i16m4(src, vl); } @@ -92,6 +140,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16m4(src, vl); } @@ -101,6 +154,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { return vfncvt_xu_f_w_u16mf4(src, vl); } @@ -110,6 +168,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u16mf4(src, vl); } @@ -119,6 +182,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { return vfncvt_xu_f_w_u16mf2(src, vl); } @@ -128,6 +196,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u16mf2(src, vl); } @@ -137,6 +210,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { return vfncvt_xu_f_w_u16m1(src, vl); } @@ -146,6 +224,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u16m1(src, vl); } @@ -155,6 +238,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { return vfncvt_xu_f_w_u16m2(src, vl); } @@ -164,6 +252,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u16m2(src, vl); } @@ -173,6 +266,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { return vfncvt_xu_f_w_u16m4(src, vl); } @@ -182,6 +280,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u16m4(src, vl); } @@ -191,6 +294,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { return vfncvt_x_f_w_i32mf2(src, vl); } @@ -200,6 +308,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) { return vfncvt_rtz_x_f_w_i32mf2(src, vl); } @@ -209,6 +322,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { return vfncvt_x_f_w_i32m1(src, vl); } @@ -218,6 +336,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i32m1(src, vl); } @@ -227,6 +350,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { return vfncvt_x_f_w_i32m2(src, vl); } @@ -236,6 +364,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i32m2(src, vl); } @@ -245,6 +378,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { return vfncvt_x_f_w_i32m4(src, vl); } @@ -254,6 +392,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) { return vfncvt_rtz_x_f_w_i32m4(src, vl); } @@ -263,6 +406,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { return vfncvt_xu_f_w_u32mf2(src, vl); } @@ -272,6 +420,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u32mf2(src, vl); } @@ -281,6 +434,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { return vfncvt_xu_f_w_u32m1(src, vl); } @@ -290,6 +448,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u32m1(src, vl); } @@ -299,6 +462,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { return vfncvt_xu_f_w_u32m2(src, vl); } @@ -308,6 +476,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u32m2(src, vl); } @@ -317,6 +490,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { return vfncvt_xu_f_w_u32m4(src, vl); } @@ -326,6 +504,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u32m4(src, vl); } @@ -335,6 +518,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv1f32.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) { return vfncvt_f_x_w_f32mf2(src, vl); } @@ -344,6 +532,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv2f32.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) { return vfncvt_f_x_w_f32m1(src, vl); } @@ -353,6 +546,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv4f32.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) { return vfncvt_f_x_w_f32m2(src, vl); } @@ -362,6 +560,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv8f32.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) { return vfncvt_f_x_w_f32m4(src, vl); } @@ -371,6 +574,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) { return vfncvt_f_xu_w_f32mf2(src, vl); } @@ -380,6 +588,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) { return vfncvt_f_xu_w_f32m1(src, vl); } @@ -389,6 +602,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) { return vfncvt_f_xu_w_f32m2(src, vl); } @@ -398,6 +616,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) { return vfncvt_f_xu_w_f32m4(src, vl); } @@ -407,6 +630,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv1f32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { return vfncvt_f_f_w_f32mf2(src, vl); } @@ -416,6 +644,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) { return vfncvt_rod_f_f_w_f32mf2(src, vl); } @@ -425,6 +658,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv2f32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { return vfncvt_f_f_w_f32m1(src, vl); } @@ -434,6 +672,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) { return vfncvt_rod_f_f_w_f32m1(src, vl); } @@ -443,6 +686,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv4f32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { return vfncvt_f_f_w_f32m2(src, vl); } @@ -452,6 +700,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) { return vfncvt_rod_f_f_w_f32m2(src, vl); } @@ -461,6 +714,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv8f32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { return vfncvt_f_f_w_f32m4(src, vl); } @@ -470,6 +728,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { return vfncvt_rod_f_f_w_f32m4(src, vl); } @@ -479,6 +742,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf4_t test_vfncvt_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfncvt_x_f_w_i16mf4_m(mask, maskedoff, src, vl); @@ -489,6 +757,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { @@ -500,6 +773,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf2_t test_vfncvt_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfncvt_x_f_w_i16mf2_m(mask, maskedoff, src, vl); @@ -510,6 +788,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { @@ -521,6 +804,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { return vfncvt_x_f_w_i16m1_m(mask, maskedoff, src, vl); @@ -531,6 +819,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16m1_m(mask, maskedoff, src, vl); @@ -541,6 +834,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_x_f_w_i16m2_m(mask, maskedoff, src, vl); @@ -551,6 +849,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16m2_m(mask, maskedoff, src, vl); @@ -561,6 +864,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_x_f_w_i16m4_m(mask, maskedoff, src, vl); @@ -571,6 +879,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_rtz_x_f_w_i16m4_m(mask, maskedoff, src, vl); @@ -581,6 +894,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfncvt_xu_f_w_u16mf4_m(mask, maskedoff, src, vl); @@ -591,6 +909,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { @@ -602,6 +925,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfncvt_xu_f_w_u16mf2_m(mask, maskedoff, src, vl); @@ -612,6 +940,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { @@ -623,6 +956,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { return vfncvt_xu_f_w_u16m1_m(mask, maskedoff, src, vl); @@ -633,6 +971,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { @@ -644,6 +987,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_xu_f_w_u16m2_m(mask, maskedoff, src, vl); @@ -654,6 +1002,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u16m2_m(mask, maskedoff, src, vl); @@ -664,6 +1017,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_xu_f_w_u16m4_m(mask, maskedoff, src, vl); @@ -674,6 +1032,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u16m4_m(mask, maskedoff, src, vl); @@ -684,6 +1047,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfncvt_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { return vfncvt_x_f_w_i32mf2_m(mask, maskedoff, src, vl); @@ -694,6 +1062,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { @@ -705,6 +1078,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_x_f_w_i32m1_m(mask, maskedoff, src, vl); @@ -715,6 +1093,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i32m1_m(mask, maskedoff, src, vl); @@ -725,6 +1108,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_x_f_w_i32m2_m(mask, maskedoff, src, vl); @@ -735,6 +1123,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i32m2_m(mask, maskedoff, src, vl); @@ -745,6 +1138,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_x_f_w_i32m4_m(mask, maskedoff, src, vl); @@ -755,6 +1153,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_rtz_x_f_w_i32m4_m(mask, maskedoff, src, vl); @@ -765,6 +1168,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { return vfncvt_xu_f_w_u32mf2_m(mask, maskedoff, src, vl); @@ -775,6 +1183,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { @@ -786,6 +1199,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_xu_f_w_u32m1_m(mask, maskedoff, src, vl); @@ -796,6 +1214,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { @@ -807,6 +1230,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_xu_f_w_u32m2_m(mask, maskedoff, src, vl); @@ -817,6 +1245,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { @@ -828,6 +1261,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_xu_f_w_u32m4_m(mask, maskedoff, src, vl); @@ -838,6 +1276,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u32m4_m(mask, maskedoff, src, vl); @@ -848,6 +1291,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint64m1_t src, size_t vl) { @@ -859,6 +1307,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { return vfncvt_f_x_w_f32m1_m(mask, maskedoff, src, vl); @@ -869,6 +1322,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { return vfncvt_f_x_w_f32m2_m(mask, maskedoff, src, vl); @@ -879,6 +1337,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { return vfncvt_f_x_w_f32m4_m(mask, maskedoff, src, vl); @@ -889,6 +1352,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint64m1_t src, size_t vl) { @@ -900,6 +1368,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { return vfncvt_f_xu_w_f32m1_m(mask, maskedoff, src, vl); @@ -910,6 +1383,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { return vfncvt_f_xu_w_f32m2_m(mask, maskedoff, src, vl); @@ -920,6 +1398,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { return vfncvt_f_xu_w_f32m4_m(mask, maskedoff, src, vl); @@ -930,6 +1413,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { @@ -941,6 +1429,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat64m1_t src, size_t vl) { @@ -952,6 +1445,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { return vfncvt_f_f_w_f32m1_m(mask, maskedoff, src, vl); @@ -962,6 +1460,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { @@ -973,6 +1476,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { return vfncvt_f_f_w_f32m2_m(mask, maskedoff, src, vl); @@ -983,6 +1491,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { @@ -994,6 +1507,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { return vfncvt_f_f_w_f32m4_m(mask, maskedoff, src, vl); @@ -1004,6 +1522,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { @@ -1015,6 +1538,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i8mf8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8mf8_t test_vfncvt_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) { return vfncvt_x_f_w_i8mf8(src, vl); } @@ -1024,6 +1552,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8mf8(src, vl); } @@ -1033,6 +1566,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i8mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8mf4_t test_vfncvt_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) { return vfncvt_x_f_w_i8mf4(src, vl); } @@ -1042,6 +1580,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8mf4(src, vl); } @@ -1051,6 +1594,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i8mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8mf2_t test_vfncvt_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) { return vfncvt_x_f_w_i8mf2(src, vl); } @@ -1060,6 +1608,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8mf2(src, vl); } @@ -1069,6 +1622,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i8m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8m1_t test_vfncvt_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) { return vfncvt_x_f_w_i8m1(src, vl); } @@ -1078,6 +1636,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i8m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8m1_t test_vfncvt_rtz_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8m1(src, vl); } @@ -1087,6 +1650,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i8m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8m2_t test_vfncvt_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) { return vfncvt_x_f_w_i8m2(src, vl); } @@ -1096,6 +1664,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i8m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8m2_t test_vfncvt_rtz_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8m2(src, vl); } @@ -1105,6 +1678,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i8m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8m4_t test_vfncvt_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) { return vfncvt_x_f_w_i8m4(src, vl); } @@ -1114,6 +1692,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i8m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8m4_t test_vfncvt_rtz_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8m4(src, vl); } @@ -1123,6 +1706,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u8mf8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8mf8_t test_vfncvt_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) { return vfncvt_xu_f_w_u8mf8(src, vl); } @@ -1132,6 +1720,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8mf8(src, vl); } @@ -1141,6 +1734,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u8mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8mf4_t test_vfncvt_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) { return vfncvt_xu_f_w_u8mf4(src, vl); } @@ -1150,6 +1748,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8mf4(src, vl); } @@ -1159,6 +1762,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u8mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8mf2_t test_vfncvt_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) { return vfncvt_xu_f_w_u8mf2(src, vl); } @@ -1168,6 +1776,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8mf2(src, vl); } @@ -1177,6 +1790,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u8m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8m1_t test_vfncvt_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) { return vfncvt_xu_f_w_u8m1(src, vl); } @@ -1186,6 +1804,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8m1(src, vl); } @@ -1195,6 +1818,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u8m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8m2_t test_vfncvt_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) { return vfncvt_xu_f_w_u8m2(src, vl); } @@ -1204,6 +1832,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8m2(src, vl); } @@ -1213,6 +1846,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u8m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8m4_t test_vfncvt_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) { return vfncvt_xu_f_w_u8m4(src, vl); } @@ -1222,6 +1860,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8m4(src, vl); } @@ -1231,6 +1874,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfncvt_f_x_w_f16mf4 (vint32mf2_t src, size_t vl) { return vfncvt_f_x_w_f16mf4(src, vl); } @@ -1240,6 +1888,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv2f16.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfncvt_f_x_w_f16mf2 (vint32m1_t src, size_t vl) { return vfncvt_f_x_w_f16mf2(src, vl); } @@ -1249,6 +1902,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv4f16.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfncvt_f_x_w_f16m1 (vint32m2_t src, size_t vl) { return vfncvt_f_x_w_f16m1(src, vl); } @@ -1258,6 +1916,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv8f16.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfncvt_f_x_w_f16m2 (vint32m4_t src, size_t vl) { return vfncvt_f_x_w_f16m2(src, vl); } @@ -1267,6 +1930,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.nxv16f16.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfncvt_f_x_w_f16m4 (vint32m8_t src, size_t vl) { return vfncvt_f_x_w_f16m4(src, vl); } @@ -1276,6 +1944,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4 (vuint32mf2_t src, size_t vl) { return vfncvt_f_xu_w_f16mf4(src, vl); } @@ -1285,6 +1958,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2 (vuint32m1_t src, size_t vl) { return vfncvt_f_xu_w_f16mf2(src, vl); } @@ -1294,6 +1972,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfncvt_f_xu_w_f16m1 (vuint32m2_t src, size_t vl) { return vfncvt_f_xu_w_f16m1(src, vl); } @@ -1303,6 +1986,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfncvt_f_xu_w_f16m2 (vuint32m4_t src, size_t vl) { return vfncvt_f_xu_w_f16m2(src, vl); } @@ -1312,6 +2000,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfncvt_f_xu_w_f16m4 (vuint32m8_t src, size_t vl) { return vfncvt_f_xu_w_f16m4(src, vl); } @@ -1321,6 +2014,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfncvt_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) { return vfncvt_f_f_w_f16mf4(src, vl); } @@ -1330,6 +2028,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) { return vfncvt_rod_f_f_w_f16mf4(src, vl); } @@ -1339,6 +2042,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv2f16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfncvt_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) { return vfncvt_f_f_w_f16mf2(src, vl); } @@ -1348,6 +2056,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) { return vfncvt_rod_f_f_w_f16mf2(src, vl); } @@ -1357,6 +2070,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv4f16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfncvt_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) { return vfncvt_f_f_w_f16m1(src, vl); } @@ -1366,6 +2084,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) { return vfncvt_rod_f_f_w_f16m1(src, vl); } @@ -1375,6 +2098,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv8f16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfncvt_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) { return vfncvt_f_f_w_f16m2(src, vl); } @@ -1384,6 +2112,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) { return vfncvt_rod_f_f_w_f16m2(src, vl); } @@ -1393,6 +2126,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.nxv16f16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfncvt_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) { return vfncvt_f_f_w_f16m4(src, vl); } @@ -1402,6 +2140,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) { return vfncvt_rod_f_f_w_f16m4(src, vl); } @@ -1411,6 +2154,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i8mf8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8mf8_t test_vfncvt_x_f_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfncvt_x_f_w_i8mf8_m(mask, maskedoff, src, vl); } @@ -1420,6 +2168,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_m (vbool64_t mask, vint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8mf8_m(mask, maskedoff, src, vl); } @@ -1429,6 +2182,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i8mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8mf4_t test_vfncvt_x_f_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfncvt_x_f_w_i8mf4_m(mask, maskedoff, src, vl); } @@ -1438,6 +2196,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_m (vbool32_t mask, vint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8mf4_m(mask, maskedoff, src, vl); } @@ -1447,6 +2210,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i8mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8mf2_t test_vfncvt_x_f_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfncvt_x_f_w_i8mf2_m(mask, maskedoff, src, vl); } @@ -1456,6 +2224,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_m (vbool16_t mask, vint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8mf2_m(mask, maskedoff, src, vl); } @@ -1465,6 +2238,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i8m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8m1_t test_vfncvt_x_f_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { return vfncvt_x_f_w_i8m1_m(mask, maskedoff, src, vl); } @@ -1474,6 +2252,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i8m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_m (vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8m1_m(mask, maskedoff, src, vl); } @@ -1483,6 +2266,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i8m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8m2_t test_vfncvt_x_f_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { return vfncvt_x_f_w_i8m2_m(mask, maskedoff, src, vl); } @@ -1492,6 +2280,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i8m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_m (vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8m2_m(mask, maskedoff, src, vl); } @@ -1501,6 +2294,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_x_f_w_i8m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8m4_t test_vfncvt_x_f_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { return vfncvt_x_f_w_i8m4_m(mask, maskedoff, src, vl); } @@ -1510,6 +2308,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_x_f_w_i8m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_m (vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { return vfncvt_rtz_x_f_w_i8m4_m(mask, maskedoff, src, vl); } @@ -1519,6 +2322,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u8mf8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfncvt_xu_f_w_u8mf8_m(mask, maskedoff, src, vl); } @@ -1528,6 +2336,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_m (vbool64_t mask, vuint8mf8_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8mf8_m(mask, maskedoff, src, vl); } @@ -1537,6 +2350,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u8mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfncvt_xu_f_w_u8mf4_m(mask, maskedoff, src, vl); } @@ -1546,6 +2364,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_m (vbool32_t mask, vuint8mf4_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8mf4_m(mask, maskedoff, src, vl); } @@ -1555,6 +2378,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u8mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfncvt_xu_f_w_u8mf2_m(mask, maskedoff, src, vl); } @@ -1564,6 +2392,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_m (vbool16_t mask, vuint8mf2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8mf2_m(mask, maskedoff, src, vl); } @@ -1573,6 +2406,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u8m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8m1_t test_vfncvt_xu_f_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { return vfncvt_xu_f_w_u8m1_m(mask, maskedoff, src, vl); } @@ -1582,6 +2420,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_m (vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8m1_m(mask, maskedoff, src, vl); } @@ -1591,6 +2434,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u8m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8m2_t test_vfncvt_xu_f_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { return vfncvt_xu_f_w_u8m2_m(mask, maskedoff, src, vl); } @@ -1600,6 +2448,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_m (vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8m2_m(mask, maskedoff, src, vl); } @@ -1609,6 +2462,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_xu_f_w_u8m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8m4_t test_vfncvt_xu_f_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { return vfncvt_xu_f_w_u8m4_m(mask, maskedoff, src, vl); } @@ -1618,6 +2476,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_m (vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { return vfncvt_rtz_xu_f_w_u8m4_m(mask, maskedoff, src, vl); } @@ -1627,6 +2490,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint32mf2_t src, size_t vl) { return vfncvt_f_x_w_f16mf4_m(mask, maskedoff, src, vl); } @@ -1636,6 +2504,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint32m1_t src, size_t vl) { return vfncvt_f_x_w_f16mf2_m(mask, maskedoff, src, vl); } @@ -1645,6 +2518,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfncvt_f_x_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { return vfncvt_f_x_w_f16m1_m(mask, maskedoff, src, vl); } @@ -1654,6 +2532,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfncvt_f_x_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { return vfncvt_f_x_w_f16m2_m(mask, maskedoff, src, vl); } @@ -1663,6 +2546,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_x_w_f16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfncvt_f_x_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { return vfncvt_f_x_w_f16m4_m(mask, maskedoff, src, vl); } @@ -1672,6 +2560,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint32mf2_t src, size_t vl) { return vfncvt_f_xu_w_f16mf4_m(mask, maskedoff, src, vl); } @@ -1681,6 +2574,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint32m1_t src, size_t vl) { return vfncvt_f_xu_w_f16mf2_m(mask, maskedoff, src, vl); } @@ -1690,6 +2588,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { return vfncvt_f_xu_w_f16m1_m(mask, maskedoff, src, vl); } @@ -1699,6 +2602,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { return vfncvt_f_xu_w_f16m2_m(mask, maskedoff, src, vl); } @@ -1708,6 +2616,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_xu_w_f16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { return vfncvt_f_xu_w_f16m4_m(mask, maskedoff, src, vl); } @@ -1717,6 +2630,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfncvt_f_f_w_f16mf4_m(mask, maskedoff, src, vl); } @@ -1726,6 +2644,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfncvt_rod_f_f_w_f16mf4_m(mask, maskedoff, src, vl); } @@ -1735,6 +2658,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfncvt_f_f_w_f16mf2_m(mask, maskedoff, src, vl); } @@ -1744,6 +2672,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfncvt_rod_f_f_w_f16mf2_m(mask, maskedoff, src, vl); } @@ -1753,6 +2686,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfncvt_f_f_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { return vfncvt_f_f_w_f16m1_m(mask, maskedoff, src, vl); } @@ -1762,6 +2700,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { return vfncvt_rod_f_f_w_f16m1_m(mask, maskedoff, src, vl); } @@ -1771,6 +2714,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfncvt_f_f_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_f_f_w_f16m2_m(mask, maskedoff, src, vl); } @@ -1780,6 +2728,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { return vfncvt_rod_f_f_w_f16m2_m(mask, maskedoff, src, vl); } @@ -1789,6 +2742,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_f_f_w_f16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfncvt_f_f_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_f_f_w_f16m4_m(mask, maskedoff, src, vl); } @@ -1798,6 +2756,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfncvt_rod_f_f_w_f16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { return vfncvt_rod_f_f_w_f16m4_m(mask, maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c --- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwcvt.c @@ -3,6 +3,9 @@ // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ // RUN: -target-feature +v -target-feature +zfh \ // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \ +// RUN: -target-feature +v -target-feature +zfh -fexperimental-strict-floating-point -frounding-math \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64-FENV %s #include @@ -11,6 +14,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8:[0-9]+]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) { return vfwcvt_f_x_v_f32mf2(src, vl); } @@ -20,6 +28,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) { return vfwcvt_f_x_v_f32m1(src, vl); } @@ -29,6 +42,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) { return vfwcvt_f_x_v_f32m2(src, vl); } @@ -38,6 +56,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) { return vfwcvt_f_x_v_f32m4(src, vl); } @@ -47,6 +70,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) { return vfwcvt_f_x_v_f32m8(src, vl); } @@ -56,6 +84,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) { return vfwcvt_f_xu_v_f32mf2(src, vl); } @@ -65,6 +98,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) { return vfwcvt_f_xu_v_f32m1(src, vl); } @@ -74,6 +112,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) { return vfwcvt_f_xu_v_f32m2(src, vl); } @@ -83,6 +126,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) { return vfwcvt_f_xu_v_f32m4(src, vl); } @@ -92,6 +140,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) { return vfwcvt_f_xu_v_f32m8(src, vl); } @@ -101,6 +154,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { return vfwcvt_x_f_v_i64m1(src, vl); } @@ -110,6 +168,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i64m1(src, vl); } @@ -119,6 +182,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { return vfwcvt_x_f_v_i64m2(src, vl); } @@ -128,6 +196,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i64m2(src, vl); } @@ -137,6 +210,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { return vfwcvt_x_f_v_i64m4(src, vl); } @@ -146,6 +224,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i64m4(src, vl); } @@ -155,6 +238,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { return vfwcvt_x_f_v_i64m8(src, vl); } @@ -164,6 +252,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i64m8(src, vl); } @@ -173,6 +266,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { return vfwcvt_xu_f_v_u64m1(src, vl); } @@ -182,6 +280,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u64m1(src, vl); } @@ -191,6 +294,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { return vfwcvt_xu_f_v_u64m2(src, vl); } @@ -200,6 +308,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u64m2(src, vl); } @@ -209,6 +322,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { return vfwcvt_xu_f_v_u64m4(src, vl); } @@ -218,6 +336,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u64m4(src, vl); } @@ -227,6 +350,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { return vfwcvt_xu_f_v_u64m8(src, vl); } @@ -236,6 +364,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u64m8(src, vl); } @@ -245,6 +378,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) { return vfwcvt_f_x_v_f64m1(src, vl); } @@ -254,6 +392,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) { return vfwcvt_f_x_v_f64m2(src, vl); } @@ -263,6 +406,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) { return vfwcvt_f_x_v_f64m4(src, vl); } @@ -272,6 +420,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) { return vfwcvt_f_x_v_f64m8(src, vl); } @@ -281,6 +434,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) { return vfwcvt_f_xu_v_f64m1(src, vl); } @@ -290,6 +448,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) { return vfwcvt_f_xu_v_f64m2(src, vl); } @@ -299,6 +462,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) { return vfwcvt_f_xu_v_f64m4(src, vl); } @@ -308,6 +476,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) { return vfwcvt_f_xu_v_f64m8(src, vl); } @@ -317,6 +490,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) { return vfwcvt_f_f_v_f64m1(src, vl); } @@ -326,6 +504,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) { return vfwcvt_f_f_v_f64m2(src, vl); } @@ -335,6 +518,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) { return vfwcvt_f_f_v_f64m4(src, vl); } @@ -344,6 +532,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { return vfwcvt_f_f_v_f64m8(src, vl); } @@ -353,6 +546,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vint16mf4_t src, size_t vl) { @@ -364,6 +562,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfwcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint16mf2_t src, size_t vl) { return vfwcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl); @@ -374,6 +577,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { return vfwcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl); @@ -384,6 +592,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { return vfwcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl); @@ -394,6 +607,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { return vfwcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl); @@ -404,6 +622,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vuint16mf4_t src, size_t vl) { @@ -415,6 +638,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint16mf2_t src, size_t vl) { return vfwcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl); @@ -425,6 +653,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { return vfwcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl); @@ -435,6 +668,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { return vfwcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl); @@ -445,6 +683,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { return vfwcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl); @@ -455,6 +698,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfwcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl); @@ -465,6 +713,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i64m1_m(mask, maskedoff, src, vl); @@ -475,6 +728,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl); @@ -485,6 +743,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i64m2_m(mask, maskedoff, src, vl); @@ -495,6 +758,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl); @@ -505,6 +773,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i64m4_m(mask, maskedoff, src, vl); @@ -515,6 +788,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl); @@ -525,6 +803,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i64m8_m(mask, maskedoff, src, vl); @@ -535,6 +818,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfwcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl); @@ -545,6 +833,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { @@ -556,6 +849,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl); @@ -566,6 +864,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { @@ -577,6 +880,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl); @@ -587,6 +895,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { @@ -598,6 +911,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl); @@ -608,6 +926,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u64m8_m(mask, maskedoff, src, vl); @@ -618,6 +941,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfwcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint32mf2_t src, size_t vl) { return vfwcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl); @@ -628,6 +956,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { return vfwcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl); @@ -638,6 +971,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { return vfwcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl); @@ -648,6 +986,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { return vfwcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl); @@ -658,6 +1001,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint32mf2_t src, size_t vl) { return vfwcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl); @@ -668,6 +1016,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { return vfwcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl); @@ -678,6 +1031,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { return vfwcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl); @@ -688,6 +1046,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { return vfwcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl); @@ -698,6 +1061,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m1_t test_vfwcvt_f_f_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t src, size_t vl) { return vfwcvt_f_f_v_f64m1_m(mask, maskedoff, src, vl); @@ -708,6 +1076,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { return vfwcvt_f_f_v_f64m2_m(mask, maskedoff, src, vl); @@ -718,6 +1091,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { return vfwcvt_f_f_v_f64m4_m(mask, maskedoff, src, vl); @@ -728,6 +1106,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f64m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { return vfwcvt_f_f_v_f64m8_m(mask, maskedoff, src, vl); @@ -738,6 +1121,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4 (vint8mf8_t src, size_t vl) { return vfwcvt_f_x_v_f16mf4(src, vl); } @@ -747,6 +1135,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2 (vint8mf4_t src, size_t vl) { return vfwcvt_f_x_v_f16mf2(src, vl); } @@ -756,6 +1149,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfwcvt_f_x_v_f16m1 (vint8mf2_t src, size_t vl) { return vfwcvt_f_x_v_f16m1(src, vl); } @@ -765,6 +1163,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfwcvt_f_x_v_f16m2 (vint8m1_t src, size_t vl) { return vfwcvt_f_x_v_f16m2(src, vl); } @@ -774,6 +1177,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfwcvt_f_x_v_f16m4 (vint8m2_t src, size_t vl) { return vfwcvt_f_x_v_f16m4(src, vl); } @@ -783,6 +1191,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f16m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m8_t test_vfwcvt_f_x_v_f16m8 (vint8m4_t src, size_t vl) { return vfwcvt_f_x_v_f16m8(src, vl); } @@ -792,6 +1205,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f16mf4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4 (vuint8mf8_t src, size_t vl) { return vfwcvt_f_xu_v_f16mf4(src, vl); } @@ -801,6 +1219,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f16mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2 (vuint8mf4_t src, size_t vl) { return vfwcvt_f_xu_v_f16mf2(src, vl); } @@ -810,6 +1233,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f16m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfwcvt_f_xu_v_f16m1 (vuint8mf2_t src, size_t vl) { return vfwcvt_f_xu_v_f16m1(src, vl); } @@ -819,6 +1247,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f16m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfwcvt_f_xu_v_f16m2 (vuint8m1_t src, size_t vl) { return vfwcvt_f_xu_v_f16m2(src, vl); } @@ -828,6 +1261,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f16m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfwcvt_f_xu_v_f16m4 (vuint8m2_t src, size_t vl) { return vfwcvt_f_xu_v_f16m4(src, vl); } @@ -837,6 +1275,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f16m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m8_t test_vfwcvt_f_xu_v_f16m8 (vuint8m4_t src, size_t vl) { return vfwcvt_f_xu_v_f16m8(src, vl); } @@ -846,6 +1289,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfwcvt_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) { return vfwcvt_x_f_v_i32mf2(src, vl); } @@ -855,6 +1303,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32mf2(src, vl); } @@ -864,6 +1317,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfwcvt_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) { return vfwcvt_x_f_v_i32m1(src, vl); } @@ -873,6 +1331,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m1(src, vl); } @@ -882,6 +1345,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfwcvt_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) { return vfwcvt_x_f_v_i32m2(src, vl); } @@ -891,6 +1359,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m2(src, vl); } @@ -900,6 +1373,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfwcvt_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) { return vfwcvt_x_f_v_i32m4(src, vl); } @@ -909,6 +1387,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m4(src, vl); } @@ -918,6 +1401,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m8_t test_vfwcvt_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) { return vfwcvt_x_f_v_i32m8(src, vl); } @@ -927,6 +1415,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m8(src, vl); } @@ -936,6 +1429,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) { return vfwcvt_xu_f_v_u32mf2(src, vl); } @@ -945,6 +1443,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32mf2(src, vl); } @@ -954,6 +1457,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfwcvt_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) { return vfwcvt_xu_f_v_u32m1(src, vl); } @@ -963,6 +1471,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m1(src, vl); } @@ -972,6 +1485,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfwcvt_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) { return vfwcvt_xu_f_v_u32m2(src, vl); } @@ -981,6 +1499,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m2(src, vl); } @@ -990,6 +1513,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfwcvt_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) { return vfwcvt_xu_f_v_u32m4(src, vl); } @@ -999,6 +1527,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m4(src, vl); } @@ -1008,6 +1541,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m8_t test_vfwcvt_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) { return vfwcvt_xu_f_v_u32m8(src, vl); } @@ -1017,6 +1555,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m8(src, vl); } @@ -1026,6 +1569,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f32mf2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2 (vfloat16mf4_t src, size_t vl) { return vfwcvt_f_f_v_f32mf2(src, vl); } @@ -1035,6 +1583,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f32m1( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfwcvt_f_f_v_f32m1 (vfloat16mf2_t src, size_t vl) { return vfwcvt_f_f_v_f32m1(src, vl); } @@ -1044,6 +1597,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f32m2( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfwcvt_f_f_v_f32m2 (vfloat16m1_t src, size_t vl) { return vfwcvt_f_f_v_f32m2(src, vl); } @@ -1053,6 +1611,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f32m4( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfwcvt_f_f_v_f32m4 (vfloat16m2_t src, size_t vl) { return vfwcvt_f_f_v_f32m4(src, vl); } @@ -1062,6 +1625,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f32m8( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64( undef, [[SRC:%.*]], i64 [[VL:%.*]]) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfwcvt_f_f_v_f32m8 (vfloat16m4_t src, size_t vl) { return vfwcvt_f_f_v_f32m8(src, vl); } @@ -1071,6 +1639,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vint8mf8_t src, size_t vl) { return vfwcvt_f_x_v_f16mf4_m(mask, maskedoff, src, vl); } @@ -1080,6 +1653,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vint8mf4_t src, size_t vl) { return vfwcvt_f_x_v_f16mf2_m(mask, maskedoff, src, vl); } @@ -1089,6 +1667,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfwcvt_f_x_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vint8mf2_t src, size_t vl) { return vfwcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl); } @@ -1098,6 +1681,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { return vfwcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); } @@ -1107,6 +1695,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { return vfwcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); } @@ -1116,6 +1709,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_x_v_f16m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { return vfwcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); } @@ -1125,6 +1723,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f16mf4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_m (vbool64_t mask, vfloat16mf4_t maskedoff, vuint8mf8_t src, size_t vl) { return vfwcvt_f_xu_v_f16mf4_m(mask, maskedoff, src, vl); } @@ -1134,6 +1737,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f16mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_m (vbool32_t mask, vfloat16mf2_t maskedoff, vuint8mf4_t src, size_t vl) { return vfwcvt_f_xu_v_f16mf2_m(mask, maskedoff, src, vl); } @@ -1143,6 +1751,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f16m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_m (vbool16_t mask, vfloat16m1_t maskedoff, vuint8mf2_t src, size_t vl) { return vfwcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl); } @@ -1152,6 +1765,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f16m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m (vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { return vfwcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); } @@ -1161,6 +1779,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f16m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { return vfwcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); } @@ -1170,6 +1793,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_xu_v_f16m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { return vfwcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); } @@ -1179,6 +1807,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfwcvt_x_f_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfwcvt_x_f_v_i32mf2_m(mask, maskedoff, src, vl); } @@ -1188,6 +1821,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32mf2_m(mask, maskedoff, src, vl); } @@ -1197,6 +1835,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfwcvt_x_f_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfwcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl); } @@ -1206,6 +1849,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m1_m(mask, maskedoff, src, vl); } @@ -1215,6 +1863,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfwcvt_x_f_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfwcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl); } @@ -1224,6 +1877,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m2_m(mask, maskedoff, src, vl); } @@ -1233,6 +1891,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfwcvt_x_f_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { return vfwcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl); } @@ -1242,6 +1905,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m4_m(mask, maskedoff, src, vl); } @@ -1251,6 +1919,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_x_f_v_i32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m8_t test_vfwcvt_x_f_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { return vfwcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl); } @@ -1260,6 +1933,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { return vfwcvt_rtz_x_f_v_i32m8_m(mask, maskedoff, src, vl); } @@ -1269,6 +1947,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfwcvt_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); } @@ -1278,6 +1961,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv1i32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32mf2_m(mask, maskedoff, src, vl); } @@ -1287,6 +1975,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfwcvt_xu_f_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfwcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl); } @@ -1296,6 +1989,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv2i32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m1_m(mask, maskedoff, src, vl); } @@ -1305,6 +2003,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfwcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl); } @@ -1314,6 +2017,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m2_m(mask, maskedoff, src, vl); } @@ -1323,6 +2031,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { return vfwcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl); } @@ -1332,6 +2045,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m4_m(mask, maskedoff, src, vl); } @@ -1341,6 +2059,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_xu_f_v_u32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { return vfwcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl); } @@ -1350,6 +2073,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.rtz.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { return vfwcvt_rtz_xu_f_v_u32m8_m(mask, maskedoff, src, vl); } @@ -1359,6 +2087,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f32mf2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat16mf4_t src, size_t vl) { return vfwcvt_f_f_v_f32mf2_m(mask, maskedoff, src, vl); } @@ -1368,6 +2101,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f32m1_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m1_t test_vfwcvt_f_f_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t src, size_t vl) { return vfwcvt_f_f_v_f32m1_m(mask, maskedoff, src, vl); } @@ -1377,6 +2115,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f32m2_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { return vfwcvt_f_f_v_f32m2_m(mask, maskedoff, src, vl); } @@ -1386,6 +2129,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f32m4_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { return vfwcvt_f_f_v_f32m4_m(mask, maskedoff, src, vl); } @@ -1395,6 +2143,11 @@ // CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) // CHECK-RV64-NEXT: ret [[TMP0]] // +// CHECK-RV64-FENV-LABEL: @test_vfwcvt_f_f_v_f32m8_m( +// CHECK-RV64-FENV-NEXT: entry: +// CHECK-RV64-FENV-NEXT: [[TMP0:%.*]] = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF:%.*]], [[SRC:%.*]], [[MASK:%.*]], i64 [[VL:%.*]], i64 0) #[[ATTR8]] +// CHECK-RV64-FENV-NEXT: ret [[TMP0]] +// vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { return vfwcvt_f_f_v_f32m8_m(mask, maskedoff, src, vl); } diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -179,6 +179,7 @@ std::vector IntrinsicTypes; RISCVPredefinedMacroT RISCVPredefinedMacros = 0; unsigned NF = 1; + bool SupportsStrictFP; public: RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName, @@ -187,7 +188,8 @@ Policy NoMaskPolicy, bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, - const std::vector &RequiredFeatures, unsigned NF); + const std::vector &RequiredFeatures, unsigned NF, + bool SupportsStrictFP); ~RVVIntrinsic() = default; StringRef getBuiltinName() const { return BuiltinName; } @@ -201,6 +203,7 @@ bool hasManualCodegen() const { return !ManualCodegen.empty(); } bool hasAutoDef() const { return HasAutoDef; } bool isMask() const { return IsMask; } + bool supportsStrictFP() const { return SupportsStrictFP; } StringRef getIRName() const { return IRName; } StringRef getManualCodegen() const { return ManualCodegen; } Policy getNoMaskPolicy() const { return NoMaskPolicy; } @@ -776,16 +779,20 @@ //===----------------------------------------------------------------------===// // RVVIntrinsic implementation //===----------------------------------------------------------------------===// -RVVIntrinsic::RVVIntrinsic( - StringRef NewName, StringRef Suffix, StringRef NewMangledName, - StringRef MangledSuffix, StringRef IRName, bool IsMask, - bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, Policy NoMaskPolicy, - bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, - const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, - const std::vector &RequiredFeatures, unsigned NF) +RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix, + StringRef NewMangledName, StringRef MangledSuffix, + StringRef IRName, bool IsMask, + bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, + Policy NoMaskPolicy, bool HasNoMaskedOverloaded, + bool HasAutoDef, StringRef ManualCodegen, + const RVVTypes &OutInTypes, + const std::vector &NewIntrinsicTypes, + const std::vector &RequiredFeatures, + unsigned NF, bool SupportsStrictFP) : IRName(IRName), IsMask(IsMask), HasVL(HasVL), HasPolicy(HasPolicy), NoMaskPolicy(NoMaskPolicy), HasNoMaskedOverloaded(HasNoMaskedOverloaded), - HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF) { + HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF), + SupportsStrictFP(SupportsStrictFP) { // Init BuiltinName, Name and MangledName BuiltinName = NewName.str(); @@ -849,8 +856,15 @@ } void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const { - if (!getIRName().empty()) - OS << " ID = Intrinsic::riscv_" + getIRName() + ";\n"; + if (!getIRName().empty()) { + if (!supportsStrictFP()) { + OS << " ID = Intrinsic::riscv_" + getIRName() + ";\n"; + } else { + OS << " ID = Builder.getIsFPConstrained() ? " + << " Intrinsic::riscv_strict_" + getIRName() + " : " + << " Intrinsic::riscv_" + getIRName() + ";\n"; + } + } if (NF >= 2) OS << " NF = " + utostr(getNF()) + ";\n"; if (hasManualCodegen()) { @@ -1176,6 +1190,7 @@ Policy NoMaskPolicy = static_cast(NoMaskPolicyRecord->getValueAsInt("Value")); bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded"); + bool SupportsStrictFP = R->getValueAsBit("SupportsStrictFP"); std::vector Log2LMULList = R->getValueAsListOfInts("Log2LMUL"); StringRef ManualCodegen = R->getValueAsString("ManualCodegen"); StringRef ManualCodegenMask = R->getValueAsString("ManualCodegenMask"); @@ -1250,7 +1265,8 @@ Name, SuffixStr, MangledName, MangledSuffixStr, IRName, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy, NoMaskPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, - Types.getValue(), IntrinsicTypes, RequiredFeatures, NF)); + Types.getValue(), IntrinsicTypes, RequiredFeatures, NF, + SupportsStrictFP)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = @@ -1260,7 +1276,7 @@ /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy, NoMaskPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes, - RequiredFeatures, NF)); + RequiredFeatures, NF, SupportsStrictFP)); } } // end for Log2LMULList } // end for TypeRange diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1135,6 +1135,14 @@ def "int_riscv_" #NAME :RISCVConversionNoMask; def "int_riscv_" # NAME # "_mask" : RISCVConversionMask; } + multiclass RISCVStrictConversion { + let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in { + def "int_riscv_strict_" #NAME : RISCVConversionNoMask; + } + let IntrProperties = [ImmArg>, IntrInaccessibleMemOnly, IntrWillReturn] in { + def "int_riscv_strict_" # NAME # "_mask" : RISCVConversionMask; + } + } multiclass RISCVUSSegLoad { def "int_riscv_" # NAME : RISCVUSSegLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask; @@ -1419,28 +1427,28 @@ defm vmsof : RISCVMaskUnaryMOut; defm vmsif : RISCVMaskUnaryMOut; - defm vfcvt_xu_f_v : RISCVConversion; - defm vfcvt_x_f_v : RISCVConversion; + defm vfcvt_xu_f_v : RISCVConversion, RISCVStrictConversion; + defm vfcvt_x_f_v : RISCVConversion, RISCVStrictConversion; defm vfcvt_rtz_xu_f_v : RISCVConversion; defm vfcvt_rtz_x_f_v : RISCVConversion; - defm vfcvt_f_xu_v : RISCVConversion; - defm vfcvt_f_x_v : RISCVConversion; + defm vfcvt_f_xu_v : RISCVConversion, RISCVStrictConversion; + defm vfcvt_f_x_v : RISCVConversion, RISCVStrictConversion; - defm vfwcvt_f_xu_v : RISCVConversion; - defm vfwcvt_f_x_v : RISCVConversion; - defm vfwcvt_xu_f_v : RISCVConversion; - defm vfwcvt_x_f_v : RISCVConversion; + defm vfwcvt_f_xu_v : RISCVConversion, RISCVStrictConversion; + defm vfwcvt_f_x_v : RISCVConversion, RISCVStrictConversion; + defm vfwcvt_xu_f_v : RISCVConversion, RISCVStrictConversion; + defm vfwcvt_x_f_v : RISCVConversion, RISCVStrictConversion; defm vfwcvt_rtz_xu_f_v : RISCVConversion; defm vfwcvt_rtz_x_f_v : RISCVConversion; - defm vfwcvt_f_f_v : RISCVConversion; + defm vfwcvt_f_f_v : RISCVConversion, RISCVStrictConversion; - defm vfncvt_f_xu_w : RISCVConversion; - defm vfncvt_f_x_w : RISCVConversion; - defm vfncvt_xu_f_w : RISCVConversion; - defm vfncvt_x_f_w : RISCVConversion; + defm vfncvt_f_xu_w : RISCVConversion, RISCVStrictConversion; + defm vfncvt_f_x_w : RISCVConversion, RISCVStrictConversion; + defm vfncvt_xu_f_w : RISCVConversion, RISCVStrictConversion; + defm vfncvt_x_f_w : RISCVConversion, RISCVStrictConversion; defm vfncvt_rtz_xu_f_w : RISCVConversion; defm vfncvt_rtz_x_f_w : RISCVConversion; - defm vfncvt_f_f_w : RISCVConversion; + defm vfncvt_f_f_w : RISCVConversion, RISCVStrictConversion; defm vfncvt_rod_f_f_w : RISCVConversion; // Output: (vector) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -308,6 +308,12 @@ // result being sign extended to 64 bit. These saturate out of range inputs. STRICT_FCVT_W_RV64 = ISD::FIRST_TARGET_STRICTFP_OPCODE, STRICT_FCVT_WU_RV64, + STRICT_VFCVT_X_F, + STRICT_VFCVT_XU_F, + STRICT_VFCVT_F_X, + STRICT_VFCVT_F_XU, + STRICT_FP_ROUND_VL, + STRICT_FP_EXTEND_VL, // Memory opcodes start here. VLE_VL = ISD::FIRST_TARGET_MEMORY_OPCODE, @@ -639,6 +645,9 @@ SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerRVVStrictIntrinsics(SDValue Op, SelectionDAG &DAG, unsigned Opc, + bool HasMask) const; + SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const; SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4656,6 +4656,35 @@ return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); } +SDValue RISCVTargetLowering::lowerRVVStrictIntrinsics(SDValue Op, + SelectionDAG &DAG, + unsigned Opc, + bool HasMask) const { + SDLoc DL(Op); + MVT XLenVT = Subtarget.getXLenVT(); + MVT VT = Op.getSimpleValueType(); + SDVTList VTs = DAG.getVTList({VT, MVT::Other}); + SmallVector Ops; + Ops.push_back(Op.getOperand(0)); // Chain + unsigned NumOperands = Op.getNumOperands(); + if (HasMask) { + for (size_t i = 2; i < NumOperands; i++) { + Ops.push_back(Op.getOperand(i)); + } + } else { + for (size_t i = 2; i < NumOperands - 1; i++) { + Ops.push_back(Op.getOperand(i)); + } + SDValue VL = Op.getOperand(NumOperands - 1); + MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); + SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); + Ops.push_back(Mask); + Ops.push_back(VL); + Ops.push_back(DAG.getUNDEF(XLenVT)); // Policy + } + return DAG.getNode(Opc, DL, VTs, Ops); +} + SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { unsigned IntNo = Op.getConstantOperandVal(0); @@ -4923,6 +4952,27 @@ Result = convertFromScalableVector(VT, Result, DAG, Subtarget); return DAG.getMergeValues({Result, Chain}, DL); } +#define CASE_STRICT(Intrin, Opcode) \ + case Intrinsic::riscv_strict_##Intrin: \ + return lowerRVVStrictIntrinsics(Op, DAG, Opcode, /*HasMask*/ false); \ + case Intrinsic::riscv_strict_##Intrin##_mask: \ + return lowerRVVStrictIntrinsics(Op, DAG, Opcode, /*HasMask*/ true); + + CASE_STRICT(vfcvt_xu_f_v, RISCVISD::STRICT_VFCVT_XU_F) + CASE_STRICT(vfcvt_x_f_v, RISCVISD::STRICT_VFCVT_X_F) + CASE_STRICT(vfcvt_f_xu_v, RISCVISD::STRICT_VFCVT_F_XU) + CASE_STRICT(vfcvt_f_x_v, RISCVISD::STRICT_VFCVT_F_X) + CASE_STRICT(vfwcvt_xu_f_v, RISCVISD::STRICT_VFCVT_XU_F) + CASE_STRICT(vfwcvt_x_f_v, RISCVISD::STRICT_VFCVT_X_F) + CASE_STRICT(vfwcvt_f_xu_v, RISCVISD::STRICT_VFCVT_F_XU) + CASE_STRICT(vfwcvt_f_x_v, RISCVISD::STRICT_VFCVT_F_X) + CASE_STRICT(vfwcvt_f_f_v, RISCVISD::STRICT_FP_EXTEND_VL) + CASE_STRICT(vfncvt_xu_f_w, RISCVISD::STRICT_VFCVT_XU_F) + CASE_STRICT(vfncvt_x_f_w, RISCVISD::STRICT_VFCVT_X_F) + CASE_STRICT(vfncvt_f_xu_w, RISCVISD::STRICT_VFCVT_F_XU) + CASE_STRICT(vfncvt_f_x_w, RISCVISD::STRICT_VFCVT_F_X) + CASE_STRICT(vfncvt_f_f_w, RISCVISD::STRICT_FP_ROUND_VL) +#undef CASE_STRICT } return lowerVectorIntrinsicSplats(Op, DAG, Subtarget); @@ -10691,6 +10741,12 @@ NODE_NAME_CASE(VCPOP_VL) NODE_NAME_CASE(VLE_VL) NODE_NAME_CASE(VSE_VL) + NODE_NAME_CASE(STRICT_VFCVT_X_F) + NODE_NAME_CASE(STRICT_VFCVT_XU_F) + NODE_NAME_CASE(STRICT_VFCVT_F_X) + NODE_NAME_CASE(STRICT_VFCVT_F_XU) + NODE_NAME_CASE(STRICT_FP_ROUND_VL) + NODE_NAME_CASE(STRICT_FP_EXTEND_VL) NODE_NAME_CASE(READ_CSR) NODE_NAME_CASE(WRITE_CSR) NODE_NAME_CASE(SWAP_CSR) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -4670,35 +4670,47 @@ //===----------------------------------------------------------------------===// // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions //===----------------------------------------------------------------------===// -defm PseudoVFCVT_XU_F : VPseudoVCVTI_V; -defm PseudoVFCVT_X_F : VPseudoVCVTI_V; +let mayRaiseFPException = true in { defm PseudoVFCVT_RTZ_XU_F : VPseudoVCVTI_V; defm PseudoVFCVT_RTZ_X_F : VPseudoVCVTI_V; +} +let Uses = [FRM], mayRaiseFPException = true in { +defm PseudoVFCVT_XU_F : VPseudoVCVTI_V; +defm PseudoVFCVT_X_F : VPseudoVCVTI_V; defm PseudoVFCVT_F_XU : VPseudoVCVTF_V; defm PseudoVFCVT_F_X : VPseudoVCVTF_V; +} //===----------------------------------------------------------------------===// // 14.18. Widening Floating-Point/Integer Type-Convert Instructions //===----------------------------------------------------------------------===// -defm PseudoVFWCVT_XU_F : VPseudoVWCVTI_V; -defm PseudoVFWCVT_X_F : VPseudoVWCVTI_V; +let mayRaiseFPException = true in { defm PseudoVFWCVT_RTZ_XU_F : VPseudoVWCVTI_V; defm PseudoVFWCVT_RTZ_X_F : VPseudoVWCVTI_V; +} +let Uses = [FRM], mayRaiseFPException = true in { +defm PseudoVFWCVT_XU_F : VPseudoVWCVTI_V; +defm PseudoVFWCVT_X_F : VPseudoVWCVTI_V; defm PseudoVFWCVT_F_XU : VPseudoVWCVTF_V; defm PseudoVFWCVT_F_X : VPseudoVWCVTF_V; defm PseudoVFWCVT_F_F : VPseudoVWCVTD_V; +} //===----------------------------------------------------------------------===// // 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions //===----------------------------------------------------------------------===// -defm PseudoVFNCVT_XU_F : VPseudoVNCVTI_W; -defm PseudoVFNCVT_X_F : VPseudoVNCVTI_W; +let mayRaiseFPException = true in { defm PseudoVFNCVT_RTZ_XU_F : VPseudoVNCVTI_W; defm PseudoVFNCVT_RTZ_X_F : VPseudoVNCVTI_W; +defm PseudoVFNCVT_ROD_F_F : VPseudoVNCVTD_W; +} +let Uses = [FRM], mayRaiseFPException = true in { +defm PseudoVFNCVT_XU_F : VPseudoVNCVTI_W; +defm PseudoVFNCVT_X_F : VPseudoVNCVTI_W; defm PseudoVFNCVT_F_XU : VPseudoVNCVTF_W; defm PseudoVFNCVT_F_X : VPseudoVNCVTF_W; defm PseudoVFNCVT_F_F : VPseudoVNCVTD_W; -defm PseudoVFNCVT_ROD_F_F : VPseudoVNCVTD_W; +} } // Predicates = [HasVInstructionsAnyF] let Predicates = [HasVInstructions] in { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -125,11 +125,30 @@ SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> ]>; +// Input: (passthru, vector, mask, vl, policy) +def SDT_RISCVFPRoundOp_VL_TA: SDTypeProfile<1, 5, [ + SDTCisFP<0>, SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisOpSmallerThanOp<0, 2>, + SDTCisFP<2>, SDTCisVec<2>, SDTCisSameNumEltsAs<0, 2>, + SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, + SDTCisVT<4, XLenVT>, SDTCisVT<5, XLenVT> +]>; +// Input: (passthru, vector, mask, vl, policy) +def SDT_RISCVFPExtendOp_VL_TA : SDTypeProfile<1, 5, [ + SDTCisFP<0>, SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisOpSmallerThanOp<2, 0>, + SDTCisFP<2>, SDTCisVec<2>, SDTCisSameNumEltsAs<0, 2>, + SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, + SDTCisVT<4, XLenVT>, SDTCisVT<5, XLenVT> +]>; def riscv_fpround_vl : SDNode<"RISCVISD::FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; def riscv_fpextend_vl : SDNode<"RISCVISD::FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; def riscv_fncvt_rod_vl : SDNode<"RISCVISD::VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; +def riscv_strict_fpextend_vl : SDNode<"RISCVISD::STRICT_FP_EXTEND_VL", + SDT_RISCVFPExtendOp_VL_TA, [SDNPHasChain]>; +def riscv_strict_fpround_vl : SDNode<"RISCVISD::STRICT_FP_ROUND_VL", + SDT_RISCVFPRoundOp_VL_TA, [SDNPHasChain]>; + def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [ SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> @@ -144,6 +163,31 @@ def riscv_sint_to_fp_vl : SDNode<"RISCVISD::SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; def riscv_uint_to_fp_vl : SDNode<"RISCVISD::UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; +// Input: (passthru, vector, mask, vl, policy) +def SDT_RISCVFP2IOp_VL_TA: SDTypeProfile<1, 5, [ + SDTCisInt<0>, SDTCisVec<0>, SDTCisSameNumEltsAs<0, 1>, + SDTCisFP<2>, SDTCisVec<2>, SDTCisSameNumEltsAs<0, 2>, + SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, + SDTCisVT<4, XLenVT>, SDTCisVT<5, XLenVT> +]>; + +// Input: (passthru, vector, mask, vl, policy) +def SDT_RISCVI2FPOp_VL_TA: SDTypeProfile<1, 5, [ + SDTCisFP<0>, SDTCisVec<0>, SDTCisSameNumEltsAs<0, 1>, + SDTCisInt<2>, SDTCisVec<2>, SDTCisSameNumEltsAs<0, 2>, + SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, + SDTCisVT<4, XLenVT>, SDTCisVT<5, XLenVT> +]>; + +def riscv_strict_fp_to_sint_vl : SDNode<"RISCVISD::STRICT_VFCVT_X_F", + SDT_RISCVFP2IOp_VL_TA, [SDNPHasChain]>; +def riscv_strict_fp_to_uint_vl : SDNode<"RISCVISD::STRICT_VFCVT_XU_F", + SDT_RISCVFP2IOp_VL_TA, [SDNPHasChain]>; +def riscv_strict_sint_to_fp_vl : SDNode<"RISCVISD::STRICT_VFCVT_F_X", + SDT_RISCVI2FPOp_VL_TA, [SDNPHasChain]>; +def riscv_strict_uint_to_fp_vl : SDNode<"RISCVISD::STRICT_VFCVT_F_XU", + SDT_RISCVI2FPOp_VL_TA, [SDNPHasChain]>; + def riscv_setcc_vl : SDNode<"RISCVISD::SETCC_VL", SDTypeProfile<1, 5, [SDTCVecEltisVT<0, i1>, SDTCisVec<1>, @@ -563,6 +607,52 @@ } } +multiclass VPatConvertStrictSDNode_VL { + def : Pat<(result_type (vop (result_type undef), + (op2_type op2_reg_class:$rs2), + (mask_type true_mask), + VLOpFrag, (XLenVT undef))), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (op2_type op2_reg_class:$rs2), + GPR:$vl, sew)>; + def : Pat<(result_type (vop (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type true_mask), + VLOpFrag, (XLenVT undef))), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_TU") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + GPR:$vl, sew)>; + def : Pat<(result_type (vop (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), + VLOpFrag, (XLenVT timm:$policy))), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>; +} + +multiclass VPatConvertFP2ISDNode_V_VL_STRICT { + foreach fvti = AllFloatVectors in { + defvar ivti = GetIntVTypeInfo.Vti; + + defm : VPatConvertStrictSDNode_VL; + } +} + multiclass VPatConvertI2FPSDNode_V_VL { foreach fvti = AllFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; @@ -574,6 +664,15 @@ } } +multiclass VPatConvertI2FPSDNode_V_VL_STRICT { + foreach fvti = AllFloatVectors in { + defvar ivti = GetIntVTypeInfo.Vti; + defm : VPatConvertStrictSDNode_VL; + } +} + multiclass VPatWConvertFP2ISDNode_V_VL { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; @@ -586,6 +685,18 @@ } } +multiclass VPatWConvertFP2ISDNode_V_VL_STRICT { + foreach fvtiToFWti = AllWidenableFloatVectors in + { + defvar fvti = fvtiToFWti.Vti; + defvar iwti = GetIntVTypeInfo.Vti; + + defm : VPatConvertStrictSDNode_VL; + } +} + multiclass VPatWConvertI2FPSDNode_V_VL { foreach vtiToWti = AllWidenableIntToFloatVectors in { defvar ivti = vtiToWti.Vti; @@ -598,6 +709,16 @@ } } +multiclass VPatWConvertI2FPSDNode_V_VL_STRICT { + foreach vtiToWti = AllWidenableIntToFloatVectors in { + defvar vti = vtiToWti.Vti; + defvar fwti = vtiToWti.Wti; + defm : VPatConvertStrictSDNode_VL; + } +} + multiclass VPatNConvertFP2ISDNode_V_VL { foreach vtiToWti = AllWidenableIntToFloatVectors in { defvar vti = vtiToWti.Vti; @@ -610,6 +731,16 @@ } } +multiclass VPatNConvertFP2ISDNode_V_VL_STRICT { + foreach vtiToWti = AllWidenableIntToFloatVectors in { + defvar vti = vtiToWti.Vti; + defvar fwti = vtiToWti.Wti; + defm : VPatConvertStrictSDNode_VL; + } +} + multiclass VPatNConvertI2FPSDNode_V_VL { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; @@ -622,6 +753,16 @@ } } +multiclass VPatNConvertI2FPSDNode_V_VL_STRICT { + foreach fvtiToFWti = AllWidenableFloatVectors in { + defvar fvti = fvtiToFWti.Vti; + defvar iwti = GetIntVTypeInfo.Vti; + defm : VPatConvertStrictSDNode_VL; + } +} + multiclass VPatReductionVL { foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { defvar vti_m1 = !cast(!if(is_float, "VF", "VI") # vti.SEW # "M1"); @@ -1419,6 +1560,11 @@ defm : VPatConvertI2FPSDNode_V_VL; defm : VPatConvertI2FPSDNode_V_VL; + defm : VPatConvertFP2ISDNode_V_VL_STRICT; + defm : VPatConvertFP2ISDNode_V_VL_STRICT; + defm : VPatConvertI2FPSDNode_V_VL_STRICT; + defm : VPatConvertI2FPSDNode_V_VL_STRICT; + // 14.18. Widening Floating-Point/Integer Type-Convert Instructions defm : VPatWConvertFP2ISDNode_V_VL; defm : VPatWConvertFP2ISDNode_V_VL; @@ -1432,8 +1578,15 @@ VLOpFrag)), (!cast("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX) fvti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + defm : VPatConvertStrictSDNode_VL; } + defm : VPatWConvertFP2ISDNode_V_VL_STRICT; + defm : VPatWConvertFP2ISDNode_V_VL_STRICT; + defm : VPatWConvertI2FPSDNode_V_VL_STRICT; + defm : VPatWConvertI2FPSDNode_V_VL_STRICT; // 14.19 Narrowing Floating-Point/Integer Type-Convert Instructions defm : VPatNConvertFP2ISDNode_V_VL; defm : VPatNConvertFP2ISDNode_V_VL; @@ -1447,6 +1600,9 @@ VLOpFrag)), (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; + defm : VPatConvertStrictSDNode_VL; def : Pat<(fvti.Vector (riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask true_mask), @@ -1454,6 +1610,10 @@ (!cast("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX) fwti.RegClass:$rs1, GPR:$vl, fvti.Log2SEW)>; } + defm : VPatNConvertFP2ISDNode_V_VL_STRICT; + defm : VPatNConvertFP2ISDNode_V_VL_STRICT; + defm : VPatNConvertI2FPSDNode_V_VL_STRICT; + defm : VPatNConvertI2FPSDNode_V_VL_STRICT; } } // Predicates = [HasVInstructionsAnyF] diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-x.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-x.ll @@ -0,0 +1,874 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv1f16.nxv1i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f16.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f16.nxv1i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv2f16.nxv2i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f16.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv2f16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f16.nxv2i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f16.nxv2i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv4f16.nxv4i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f16.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv4f16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f16.nxv4i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f16.nxv4i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv8f16.nxv8i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f16.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv8f16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f16.nxv8i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f16.nxv8i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv16f16.nxv16i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv16f16.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv16f16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv16f16.nxv16i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f16.nxv16i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv32f16.nxv32i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv32f16.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv32f16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv32f16.nxv32i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv32f16.nxv32i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv1f32.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f32.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv1f32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f32.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f32.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv2f32.nxv2i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f32.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv2f32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f32.nxv2i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f32.nxv2i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv4f32.nxv4i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f32.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv4f32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f32.nxv4i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f32.nxv4i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv8f32.nxv8i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f32.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv8f32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f32.nxv8i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f32.nxv8i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv16f32.nxv16i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv16f32.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv16f32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv16f32.nxv16i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv16f32.nxv16i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv1f64.nxv1i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f64.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv1f64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv1f64.nxv1i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv1f64.nxv1i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv2f64.nxv2i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f64.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv2f64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv2f64.nxv2i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv2f64.nxv2i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv4f64.nxv4i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f64.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv4f64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv4f64.nxv4i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv4f64.nxv4i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.nxv8f64.nxv8i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f64.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.x.v_nxv8f64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.x.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.nxv8f64.nxv8i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.x.v.mask.nxv8f64.nxv8i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-xu.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-f-xu.ll @@ -0,0 +1,874 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f16.nxv1i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f16.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv1f16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f16.nxv1i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f16.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f16.nxv2i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f16.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv2f16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f16.nxv2i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f16.nxv2i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f16.nxv4i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f16.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv4f16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f16.nxv4i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f16.nxv8i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f16.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv8f16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f16.nxv8i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f16.nxv16i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f16.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv16f16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f16.nxv16i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv32f16.nxv32i16( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv32f16.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv32f16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv32f16.nxv32i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv32f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f32.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f32.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv1f32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f32.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f32.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f32.nxv2i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f32.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv2f32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f32.nxv2i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f32.nxv4i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f32.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv4f32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f32.nxv4i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f32.nxv8i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f32.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv8f32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f32.nxv8i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f32.nxv16i32( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f32.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv16f32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv16f32.nxv16i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f64.nxv1i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f64.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv1f64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv1f64.nxv1i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f64.nxv2i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f64.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv2f64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv2f64.nxv2i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f64.nxv4i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f64.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv4f64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv4f64.nxv4i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f64.nxv8i64( + , + , + iXLen); + +define @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f64.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_f.xu.v_nxv8f64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_f.xu.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.nxv8f64.nxv8i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_f.xu.v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-x-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-x-f.ll @@ -0,0 +1,874 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv1i16.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i16.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i16.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i16.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv2i16.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i16.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv2i16_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i16.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i16.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv4i16.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i16.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv4i16_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i16.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i16.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv8i16.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i16.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv8i16_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i16.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i16.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv16i16.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv16i16.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv16i16_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv16i16.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i16.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv32i16.nxv32f16( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv32i16.nxv32f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv32i16_nxv32f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv32i16.nxv32f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv32i16.nxv32f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv1i32.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i32.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv1i32_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i32.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i32.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv2i32.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i32.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv2i32_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i32.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i32.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv4i32.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i32.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv4i32_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i32.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i32.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv8i32.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i32.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv8i32_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i32.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i32.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv16i32.nxv16f32( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv16i32.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv16i32_nxv16f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv16i32.nxv16f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv16i32.nxv16f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv1i64.nxv1f64( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i64.nxv1f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv1i64_nxv1f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv1i64.nxv1f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv1i64.nxv1f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv2i64.nxv2f64( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i64.nxv2f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv2i64_nxv2f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv2i64.nxv2f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv2i64.nxv2f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv4i64.nxv4f64( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i64.nxv4f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv4i64_nxv4f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv4i64.nxv4f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv4i64.nxv4f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.nxv8i64.nxv8f64( + , + , + iXLen); + +define @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i64.nxv8f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_x.f.v_nxv8i64_nxv8f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_x.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.nxv8i64.nxv8f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_x.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.x.f.v.mask.nxv8i64.nxv8f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-xu-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfcvt-xu-f.ll @@ -0,0 +1,874 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i16.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i16.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv1i16_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i16.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i16.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i16.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv2i16_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i16.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i16.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i16.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i16.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv4i16_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i16.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i16.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i16.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv8i16_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i16.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i16.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i16.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv16i16_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i16.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv32i16.nxv32f16( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv32i16.nxv32f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv32i16_nxv32f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv32i16.nxv32f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv32i16_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i32.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i32.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv1i32_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i32.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i32.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i32.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i32.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv2i32_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i32.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i32.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i32.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv4i32_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i32.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i32.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i32.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv8i32_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i32.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i32.nxv16f32( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i32.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv16i32_nxv16f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv16i32.nxv16f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv16i32_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i64.nxv1f64( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i64.nxv1f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv1i64_nxv1f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv1i64.nxv1f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i64.nxv2f64( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i64.nxv2f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv2i64_nxv2f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv2i64.nxv2f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv2i64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i64.nxv4f64( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i64.nxv4f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv4i64_nxv4f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv4i64.nxv4f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv4i64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i64.nxv8f64( + , + , + iXLen); + +define @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i64.nxv8f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfcvt_tu_xu.f.v_nxv8i64_nxv8f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_tu_xu.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.nxv8i64.nxv8f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv8i64_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfcvt.xu.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-f.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv1f16.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv1f16.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv1f16_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv1f16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv1f16.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv2f16.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv2f16.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv2f16_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv2f16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv2f16.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv4f16.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv4f16.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv4f16_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv4f16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv4f16.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv8f16.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv8f16.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv8f16_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv8f16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv8f16.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv16f16.nxv16f32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv16f16.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv16f16_nxv16f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv16f16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv16f16.nxv16f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv1f32.nxv1f64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv1f32.nxv1f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv1f32_nxv1f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv1f32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv1f32.nxv1f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv2f32.nxv2f64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv2f32.nxv2f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv2f32_nxv2f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv2f32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv2f32.nxv2f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv4f32.nxv4f64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv4f32.nxv4f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv4f32_nxv4f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv4f32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv4f32.nxv4f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.nxv8f32.nxv8f64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv8f32.nxv8f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.f.w_nxv8f32_nxv8f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.f.w_nxv8f32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.nxv8f32.nxv8f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-x.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-x.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv1f16.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv1f16.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv1f16.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv2f16.nxv2i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv2f16.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv2f16_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv2f16.nxv2i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv4f16.nxv4i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv4f16.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv4f16_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv4f16.nxv4i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv8f16.nxv8i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv8f16.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv8f16_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv8f16.nxv8i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv16f16.nxv16i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv16f16.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv16f16_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv16f16.nxv16i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv1f32.nxv1i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv1f32.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv1f32_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv1f32.nxv1i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv2f32.nxv2i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv2f32.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv2f32_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv2f32.nxv2i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv4f32.nxv4i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv4f32.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv4f32_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv4f32.nxv4i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.nxv8f32.nxv8i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv8f32.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.x.w_nxv8f32_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.x.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.nxv8f32.nxv8i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.x.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-xu.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-f-xu.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f16.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f16.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv1f16_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f16.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f16.nxv2i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f16.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv2f16_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f16.nxv2i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f16.nxv4i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f16.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv4f16_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f16.nxv4i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f16.nxv8i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f16.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv8f16_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f16.nxv8i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv16f16.nxv16i32( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv16f16.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv16f16_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv16f16.nxv16i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f32.nxv1i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f32.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv1f32_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv1f32.nxv1i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f32.nxv2i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f32.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv2f32_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv2f32.nxv2i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f32.nxv4i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f32.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv4f32_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv4f32.nxv4i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f32.nxv8i64( + , + , + iXLen); + +define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f32.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_f.xu.w_nxv8f32_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_f.xu.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.nxv8f32.nxv8i64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-x-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-x-f.ll @@ -0,0 +1,889 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv1i8.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i8.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i8.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv2i8.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i8.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv2i8_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i8.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv4i8.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i8.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv4i8_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i8.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv8i8.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i8.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv8i8_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i8.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv16i8.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv16i8.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv16i8_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv16i8.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv32i8.nxv32f16( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv32i8.nxv32f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv32i8_nxv32f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv32i8.nxv32f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv1i16.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i16.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv1i16_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i16.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv2i16.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i16.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv2i16_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i16.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv4i16.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i16.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv4i16_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i16.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv8i16.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i16.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv8i16_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i16.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv16i16.nxv16f32( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv16i16.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv16i16_nxv16f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv16i16.nxv16f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv1i32.nxv1f64( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i32.nxv1f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv1i32_nxv1f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv1i32.nxv1f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv2i32.nxv2f64( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i32.nxv2f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv2i32_nxv2f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv2i32.nxv2f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv4i32.nxv4f64( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i32.nxv4f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv4i32_nxv4f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv4i32.nxv4f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.nxv8i32.nxv8f64( + , + , + iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i32.nxv8f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_x.f.w_nxv8i32_nxv8f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_x.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.nxv8i32.nxv8f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-xu-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfncvt-xu-f.ll @@ -0,0 +1,889 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i8.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i8.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv1i8_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i8.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i8.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i8.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv2i8_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i8.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i8.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i8.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv4i8_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i8.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i8.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i8.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv8i8_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i8.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i8.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i8.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv16i8_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i8.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv32i8.nxv32f16( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv32i8.nxv32f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv32i8_nxv32f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv32i8.nxv32f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i16.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i16.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv1i16_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i16.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i16.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i16.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv2i16_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i16.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i16.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i16.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv4i16_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i16.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i16.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i16.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv8i16_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i16.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i16.nxv16f32( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i16.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv16i16_nxv16f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv16i16.nxv16f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i32.nxv1f64( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i32.nxv1f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv1i32_nxv1f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv1i32.nxv1f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i32.nxv2f64( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i32.nxv2f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv2i32_nxv2f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv2i32.nxv2f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i32.nxv4f64( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i32.nxv4f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv4i32_nxv4f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv4i32.nxv4f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i32.nxv8f64( + , + , + iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i32.nxv8f64( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfncvt_tu_xu.f.w_nxv8i32_nxv8f64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_tu_xu.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.nxv8i32.nxv8f64( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-f.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f32.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f32.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv1f32_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv1f32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f32.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f32.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f32.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv2f32_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv2f32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f32.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f32.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f32.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f32.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv4f32_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv4f32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f32.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f32.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f32.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv8f32_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv8f32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f32.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv16f32.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv16f32.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv16f32_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv16f32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv16f32.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv16f32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f64.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f64.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv1f64_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv1f64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv1f64.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv1f64.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f64.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f64.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv2f64_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv2f64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv2f64.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv2f64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f64.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f64.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv4f64_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv4f64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv4f64.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv4f64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f64.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f64.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.f.v_nxv8f64_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.f.v_nxv8f64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.nxv8f64.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv8f64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-x.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-x.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-x.ll @@ -0,0 +1,889 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f16.nxv1i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f16.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f16.nxv1i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f16.nxv2i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f16.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv2f16_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f16.nxv2i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f16.nxv4i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f16.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv4f16_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f16.nxv4i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f16.nxv8i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f16.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv8f16_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f16.nxv8i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f16.nxv16i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f16.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv16f16_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f16.nxv16i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv32f16.nxv32i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv32f16.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv32f16_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv32f16.nxv32i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f32.nxv1i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f32.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv1f32_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f32.nxv1i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f32.nxv2i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f32.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv2f32_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f32.nxv2i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f32.nxv4i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f32.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv4f32_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f32.nxv4i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f32.nxv8i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f32.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv8f32_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f32.nxv8i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f32.nxv16i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f32.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv16f32_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv16f32.nxv16i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f64.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f64.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv1f64_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv1f64.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f64.nxv2i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f64.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv2f64_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv2f64.nxv2i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f64.nxv4i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f64.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv4f64_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv4f64.nxv4i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f64.nxv8i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f64.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.x.v_nxv8f64_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.x.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.nxv8f64.nxv8i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-xu.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-xu.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-f-xu.ll @@ -0,0 +1,889 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f16.nxv1i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f16.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv1f16_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f16.nxv1i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f16.nxv2i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f16.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv2f16_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f16.nxv2i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f16.nxv4i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f16.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv4f16_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f16.nxv4i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f16.nxv8i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f16.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv8f16_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f16.nxv8i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f16.nxv16i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f16.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv16f16_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f16.nxv16i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv32f16.nxv32i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv32f16.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv32f16_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv32f16.nxv32i8( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f32.nxv1i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f32.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv1f32_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f32.nxv1i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f32.nxv2i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f32.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv2f32_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f32.nxv2i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f32.nxv4i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f32.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv4f32_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f32.nxv4i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f32.nxv8i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f32.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv8f32_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f32.nxv8i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f32.nxv16i16( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f32.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv16f32_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv16f32.nxv16i16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f64.nxv1i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f64.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv1f64_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv1f64.nxv1i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f64.nxv2i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f64.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv2f64_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv2f64.nxv2i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f64.nxv4i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f64.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv4f64_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv4f64.nxv4i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f64.nxv8i32( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f64.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_f.xu.v_nxv8f64_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_f.xu.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.nxv8f64.nxv8i32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-x-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-x-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-x-f.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i32.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i32.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i32.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i32.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i32.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv2i32_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i32.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i32.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i32.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv4i32_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i32.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i32.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i32.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv8i32_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i32.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv16i32.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv16i32.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv16i32_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv16i32.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i64.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i64.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv1i64_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv1i64.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i64.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i64.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv2i64_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv2i64.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i64.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i64.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv4i64_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv4i64.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i64.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i64.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_x.f.v_nxv8i64_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_x.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.nxv8i64.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_x.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.x.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-xu-f.ll b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-xu-f.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/strict-vfwcvt-xu-f.ll @@ -0,0 +1,535 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv1i32_nxv1f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i32.nxv1f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i32.nxv2f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i32.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv2i32_nxv2f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i32.nxv2f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i32_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i32.nxv4f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i32.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv4i32_nxv4f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i32.nxv4f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i32_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i32.nxv8f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i32.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv8i32_nxv8f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i32.nxv8f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i32_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv16i32.nxv16f16( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv16i32.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv16i32_nxv16f16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv16i32.nxv16f16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv16i32_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i64.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i64.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv1i64_nxv1f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv1i64.nxv1f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i64_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i64.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v10, v8 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i64.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv2i64_nxv2f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv2i64.nxv2f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv2i64_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i64.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v12, v8 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i64.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv4i64_nxv4f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv4i64.nxv4f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv4i64_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i64.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v16, v8 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i64.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfwcvt_tu_xu.f.v_nxv8i64_nxv8f32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_tu_xu.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.nxv8i64.nxv8f32( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv8i64_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfwcvt.xu.f.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.strict.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +}