diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -200,6 +200,16 @@ // Basic classes with automatic codegen. //===----------------------------------------------------------------------===// +class RVVOutBuiltin + : RVVBuiltin { + let IntrinsicTypes = [-1]; +} + +class RVVOp0Builtin + : RVVBuiltin { + let IntrinsicTypes = [0]; +} + class RVVOutOp1Builtin : RVVBuiltin { let IntrinsicTypes = [-1, 1]; @@ -361,6 +371,14 @@ : RVVOp0Op1BuiltinSet; +class RVVFloatingUnaryBuiltin + : RVVOutBuiltin { + let Name = NAME # "_" # builtin_suffix; +} + +class RVVFloatingUnaryVVBuiltin : RVVFloatingUnaryBuiltin<"v", "v", "vv">; + // For widen operation which has different mangling name. multiclass RVVWidenBuiltinSet> suffixes_prototypes> { @@ -844,13 +862,13 @@ defm vfwnmsac : RVVFloatingWidenTerBuiltinSet; // 14.8. Vector Floating-Point Square-Root Instruction -// TODO +def vfsqrt : RVVFloatingUnaryVVBuiltin; // 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction -// TODO +def vfrsqrt7 : RVVFloatingUnaryVVBuiltin; // 14.10. Vector Floating-Point Reciprocal Estimate Instruction -// TODO +def vfrec7 : RVVFloatingUnaryVVBuiltin; // 14.11. Vector Floating-Point MIN/MAX Instructions defm vfmin : RVVFloatingBinBuiltinSet; @@ -870,10 +888,12 @@ defm vmfge : RVVFloatingMaskOutVFBuiltinSet; // 14.14. Vector Floating-Point Classify Instruction -// TODO +let Name = "vfclass_v" in + def vfclass : RVVOp0Builtin<"Uv", "Uvv", "fd">; // 14.15. Vector Floating-Point Merge Instructio -// TODO +let Name = "vfmerge_vfm", HasMask = false, PermuteOperands = [2, 0, 1] in + def vfmerge : RVVOutOp1Builtin<"v", "vvem", "fd">; // 14.16. Vector Floating-Point Move Instruction // TODO diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfclass.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfclass_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) { + return vfclass(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vfloat32mf2_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vfloat32m1_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vfloat32m2_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vfloat32m4_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vfloat32m8_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vfloat64m1_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vfloat64m2_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vfloat64m4_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vfloat64m8_t op1, size_t vl) { + return vfclass(mask, maskedoff, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmerge.c @@ -0,0 +1,146 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, + float op2, size_t vl) { + return vfmerge(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, + size_t vl) { + return vfmerge(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, + size_t vl) { + return vfmerge(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, + size_t vl) { + return vfmerge(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, + size_t vl) { + return vfmerge(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, + double op2, size_t vl) { + return vfmerge(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, + double op2, size_t vl) { + return vfmerge(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, + double op2, size_t vl) { + return vfmerge(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2, + size_t vl) { + return vfmerge(mask, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrec7.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) { + return vfrec7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, + vfloat32mf2_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, + vfloat32m1_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, + vfloat32m2_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, + vfloat32m4_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, + vfloat32m8_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, size_t vl) { + return vfrec7(mask, maskedoff, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfrsqrt7.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) { + return vfrsqrt7(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, + vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, + vfloat32m1_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, + vfloat32m2_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, + vfloat32m4_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, + vfloat32m8_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, size_t vl) { + return vfrsqrt7(mask, maskedoff, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfsqrt.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6:#.*]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { + return vfsqrt(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, + vfloat32mf2_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, + vfloat32m1_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, + vfloat32m2_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, + vfloat32m4_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, + vfloat32m8_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, size_t vl) { + return vfsqrt(mask, maskedoff, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfclass.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfclass_v_u32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) { + return vfclass_v_u32mf2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) { + return vfclass_v_u32m1(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) { + return vfclass_v_u32m2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) { + return vfclass_v_u32m4(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) { + return vfclass_v_u32m8(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) { + return vfclass_v_u64m1(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) { + return vfclass_v_u64m2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) { + return vfclass_v_u64m4(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) { + return vfclass_v_u64m8(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, + vfloat32mf2_t op1, size_t vl) { + return vfclass_v_u32mf2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, + vfloat32m1_t op1, size_t vl) { + return vfclass_v_u32m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, + vfloat32m2_t op1, size_t vl) { + return vfclass_v_u32m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, + vfloat32m4_t op1, size_t vl) { + return vfclass_v_u32m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, + vfloat32m8_t op1, size_t vl) { + return vfclass_v_u32m8_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, + vfloat64m1_t op1, size_t vl) { + return vfclass_v_u64m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, + vfloat64m2_t op1, size_t vl) { + return vfclass_v_u64m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, + vfloat64m4_t op1, size_t vl) { + return vfclass_v_u64m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfclass_v_u64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, + vfloat64m8_t op1, size_t vl) { + return vfclass_v_u64m8_m(mask, maskedoff, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmerge.c @@ -0,0 +1,146 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, + float op2, size_t vl) { + return vfmerge_vfm_f32mf2(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, + size_t vl) { + return vfmerge_vfm_f32m1(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, + size_t vl) { + return vfmerge_vfm_f32m2(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, + size_t vl) { + return vfmerge_vfm_f32m4(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i32( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16f32.f32.i64( [[OP1:%.*]], float [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, + size_t vl) { + return vfmerge_vfm_f32m8(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, + double op2, size_t vl) { + return vfmerge_vfm_f64m1(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, + double op2, size_t vl) { + return vfmerge_vfm_f64m2(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, + double op2, size_t vl) { + return vfmerge_vfm_f64m4(mask, op1, op2, vl); +} + +// CHECK-RV32-LABEL: @test_vfmerge_vfm_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i32( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8f64.f64.i64( [[OP1:%.*]], double [[OP2:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2, + size_t vl) { + return vfmerge_vfm_f64m8(mask, op1, op2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrec7.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { + return vfrec7_v_f32mf2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) { + return vfrec7_v_f32m1(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) { + return vfrec7_v_f32m2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) { + return vfrec7_v_f32m4(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) { + return vfrec7_v_f32m8(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) { + return vfrec7_v_f64m1(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) { + return vfrec7_v_f64m2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) { + return vfrec7_v_f64m4(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) { + return vfrec7_v_f64m8(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrec7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, + vfloat32mf2_t op1, size_t vl) { + return vfrec7_v_f32mf2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfrec7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, + vfloat32m1_t op1, size_t vl) { + return vfrec7_v_f32m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfrec7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, + vfloat32m2_t op1, size_t vl) { + return vfrec7_v_f32m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfrec7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, + vfloat32m4_t op1, size_t vl) { + return vfrec7_v_f32m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfrec7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, + vfloat32m8_t op1, size_t vl) { + return vfrec7_v_f32m8_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfrec7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, size_t vl) { + return vfrec7_v_f64m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrec7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, size_t vl) { + return vfrec7_v_f64m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrec7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, size_t vl) { + return vfrec7_v_f64m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrec7_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, size_t vl) { + return vfrec7_v_f64m8_m(mask, maskedoff, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfrsqrt7.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_v_f32mf2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) { + return vfrsqrt7_v_f32m1(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) { + return vfrsqrt7_v_f32m2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) { + return vfrsqrt7_v_f32m4(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) { + return vfrsqrt7_v_f32m8(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) { + return vfrsqrt7_v_f64m1(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) { + return vfrsqrt7_v_f64m2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) { + return vfrsqrt7_v_f64m4(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) { + return vfrsqrt7_v_f64m8(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, + vfloat32mf2_t op1, size_t vl) { + return vfrsqrt7_v_f32mf2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfrsqrt7_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, + vfloat32m1_t op1, size_t vl) { + return vfrsqrt7_v_f32m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfrsqrt7_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, + vfloat32m2_t op1, size_t vl) { + return vfrsqrt7_v_f32m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfrsqrt7_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, + vfloat32m4_t op1, size_t vl) { + return vfrsqrt7_v_f32m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfrsqrt7_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, + vfloat32m8_t op1, size_t vl) { + return vfrsqrt7_v_f32m8_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfrsqrt7_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, size_t vl) { + return vfrsqrt7_v_f64m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfrsqrt7_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, size_t vl) { + return vfrsqrt7_v_f64m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfrsqrt7_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, size_t vl) { + return vfrsqrt7_v_f64m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfrsqrt7_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, size_t vl) { + return vfrsqrt7_v_f64m8_m(mask, maskedoff, op1, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfsqrt.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s + +// ASM-NOT: warning +#include + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) { + return vfsqrt_v_f32mf2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) { + return vfsqrt_v_f32m1(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) { + return vfsqrt_v_f32m2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) { + return vfsqrt_v_f32m4(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f32.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv16f32.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) { + return vfsqrt_v_f32m8(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv1f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) { + return vfsqrt_v_f64m1(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv2f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) { + return vfsqrt_v_f64m2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv4f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) { + return vfsqrt_v_f64m4(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i32( [[OP1:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.nxv8f64.i64( [[OP1:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { + return vfsqrt_v_f64m8(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfsqrt_v_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, + vfloat32mf2_t op1, size_t vl) { + return vfsqrt_v_f32mf2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, + vfloat32m1_t op1, size_t vl) { + return vfsqrt_v_f32m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, + vfloat32m2_t op1, size_t vl) { + return vfsqrt_v_f32m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, + vfloat32m4_t op1, size_t vl) { + return vfsqrt_v_f32m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, + vfloat32m8_t op1, size_t vl) { + return vfsqrt_v_f32m8_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, + vfloat64m1_t op1, size_t vl) { + return vfsqrt_v_f64m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, + vfloat64m2_t op1, size_t vl) { + return vfsqrt_v_f64m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, + vfloat64m4_t op1, size_t vl) { + return vfsqrt_v_f64m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfsqrt_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, + vfloat64m8_t op1, size_t vl) { + return vfsqrt_v_f64m8_m(mask, maskedoff, op1, vl); +}