diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -861,6 +861,33 @@ } } +multiclass RVVPseudoVFUnaryBuiltin { + let Name = NAME, + IRName = IR, + IRNameMask = IR # "_mask", + ManualCodegen = [{ + { + // op1, vl + IntrinsicTypes = {ResultType, + Ops[0]->getType(), Ops[1]->getType()}; + Ops.insert(Ops.begin() + 1, Ops[0]); + break; + } + }], + ManualCodegenMask = [{ + { + // maskedoff, op1, mask, vl + IntrinsicTypes = {ResultType, + Ops[1]->getType(), + Ops[3]->getType()}; + Ops.insert(Ops.begin() + 2, Ops[1]); + break; + } + }] in { + def : RVVBuiltin<"v", "vv", type_range>; + } +} + // 6. Configuration-Setting Instructions // 6.1. vsetvli/vsetvl instructions let HasVL = false, @@ -1216,6 +1243,8 @@ defm vfsgnj : RVVFloatingBinBuiltinSet; defm vfsgnjn : RVVFloatingBinBuiltinSet; defm vfsgnjx : RVVFloatingBinBuiltinSet; +defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "fd">; +defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "fd">; // 14.13. Vector Floating-Point Compare Instructions defm vmfeq : RVVFloatingMaskOutBuiltinSet; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c @@ -0,0 +1,261 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV32-LABEL: @test_vfabs_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) { + return vfabs(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfabs_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfabs_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfabs_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfabs_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfabs_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfabs_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfabs_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return vfabs(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c @@ -0,0 +1,261 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV32-LABEL: @test_vfneg_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) { + return vfneg(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6:[0-9]+]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfneg_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfneg_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfneg_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfneg_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfneg_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfneg_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfneg_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) #[[ATTR6]] +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return vfneg(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c @@ -0,0 +1,261 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV32-LABEL: @test_vfabs_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { + return vfabs_v_f32mf2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfabs_v_f32m1 (vfloat32m1_t op1, size_t vl) { + return vfabs_v_f32m1(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfabs_v_f32m2 (vfloat32m2_t op1, size_t vl) { + return vfabs_v_f32m2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfabs_v_f32m4 (vfloat32m4_t op1, size_t vl) { + return vfabs_v_f32m4(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfabs_v_f32m8 (vfloat32m8_t op1, size_t vl) { + return vfabs_v_f32m8(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfabs_v_f64m1 (vfloat64m1_t op1, size_t vl) { + return vfabs_v_f64m1(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfabs_v_f64m2 (vfloat64m2_t op1, size_t vl) { + return vfabs_v_f64m2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfabs_v_f64m4 (vfloat64m4_t op1, size_t vl) { + return vfabs_v_f64m4(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfabs_v_f64m8 (vfloat64m8_t op1, size_t vl) { + return vfabs_v_f64m8(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfabs_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { + return vfabs_v_f32mf2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfabs_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return vfabs_v_f32m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfabs_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return vfabs_v_f32m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfabs_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return vfabs_v_f32m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfabs_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return vfabs_v_f32m8_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfabs_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return vfabs_v_f64m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfabs_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return vfabs_v_f64m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfabs_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return vfabs_v_f64m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfabs_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfabs_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return vfabs_v_f64m8_m(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c @@ -0,0 +1,261 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \ +// RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV32-LABEL: @test_vfneg_v_f32mf2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2 (vfloat32mf2_t op1, size_t vl) { + return vfneg_v_f32mf2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f32.nxv2f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfneg_v_f32m1 (vfloat32m1_t op1, size_t vl) { + return vfneg_v_f32m1(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f32.nxv4f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfneg_v_f32m2 (vfloat32m2_t op1, size_t vl) { + return vfneg_v_f32m2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f32.nxv8f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfneg_v_f32m4 (vfloat32m4_t op1, size_t vl) { + return vfneg_v_f32m4(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16f32.nxv16f32.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfneg_v_f32m8 (vfloat32m8_t op1, size_t vl) { + return vfneg_v_f32m8(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m1( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m1( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1f64.nxv1f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfneg_v_f64m1 (vfloat64m1_t op1, size_t vl) { + return vfneg_v_f64m1(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2f64.nxv2f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfneg_v_f64m2 (vfloat64m2_t op1, size_t vl) { + return vfneg_v_f64m2(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m4( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m4( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4f64.nxv4f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfneg_v_f64m4 (vfloat64m4_t op1, size_t vl) { + return vfneg_v_f64m4(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i32( [[OP1:%.*]], [[OP1]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8f64.nxv8f64.i64( [[OP1:%.*]], [[OP1]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfneg_v_f64m8 (vfloat64m8_t op1, size_t vl) { + return vfneg_v_f64m8(op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32mf2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfneg_v_f32mf2_m (vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, size_t vl) { + return vfneg_v_f32mf2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfneg_v_f32m1_m (vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return vfneg_v_f32m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfneg_v_f32m2_m (vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return vfneg_v_f32m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfneg_v_f32m4_m (vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return vfneg_v_f32m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f32m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f32m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfneg_v_f32m8_m (vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return vfneg_v_f32m8_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m1_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m1_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m1_t test_vfneg_v_f64m1_m (vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return vfneg_v_f64m1_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m2_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m2_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m2_t test_vfneg_v_f64m2_m (vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return vfneg_v_f64m2_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m4_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m4_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m4_t test_vfneg_v_f64m4_m (vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return vfneg_v_f64m4_m(mask, maskedoff, op1, vl); +} + +// CHECK-RV32-LABEL: @test_vfneg_v_f64m8_m( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i32( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i32 [[VL:%.*]]) +// CHECK-RV32-NEXT: ret [[TMP0]] +// +// CHECK-RV64-LABEL: @test_vfneg_v_f64m8_m( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP1]], [[MASK:%.*]], i64 [[VL:%.*]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return vfneg_v_f64m8_m(mask, maskedoff, op1, vl); +} +