diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -514,6 +514,14 @@ defm "" : VPatBinaryFPSDNode_VV_VF; defm "" : VPatBinaryFPSDNode_R_VF; +// 14.10. Vector Floating-Point Sign-Injection Instructions +// Handle fneg with VFSGNJN using the same input for both operands. +foreach vti = AllFloatVectors in { + def : Pat<(fneg (vti.Vector vti.RegClass:$rs)), + (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX) + vti.RegClass:$rs, vti.RegClass:$rs, vti.AVL, vti.SEW)>; +} + // 14.11. Vector Floating-Point Compare Instructions defm "" : VPatFPSetCCSDNode_VV_VF_FV; defm "" : VPatFPSetCCSDNode_VV_VF_FV; diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll @@ -0,0 +1,155 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define @vfneg_vv_nxv1f16( %va) { +; CHECK-LABEL: vfneg_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv2f16( %va) { +; CHECK-LABEL: vfneg_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv4f16( %va) { +; CHECK-LABEL: vfneg_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv8f16( %va) { +; CHECK-LABEL: vfneg_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv16f16( %va) { +; CHECK-LABEL: vfneg_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv32f16( %va) { +; CHECK-LABEL: vfneg_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv1f32( %va) { +; CHECK-LABEL: vfneg_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv2f32( %va) { +; CHECK-LABEL: vfneg_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv4f32( %va) { +; CHECK-LABEL: vfneg_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv8f32( %va) { +; CHECK-LABEL: vfneg_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv16f32( %va) { +; CHECK-LABEL: vfneg_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv1f64( %va) { +; CHECK-LABEL: vfneg_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv2f64( %va) { +; CHECK-LABEL: vfneg_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv4f64( %va) { +; CHECK-LABEL: vfneg_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +} + +define @vfneg_vv_nxv8f64( %va) { +; CHECK-LABEL: vfneg_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vfsgnjn.vv v8, v8, v8 +; CHECK-NEXT: ret + %vb = fneg %va + ret %vb +}