diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1305,8 +1305,6 @@ return false; if (VT == MVT::f64 && !Subtarget.hasStdExtD()) return false; - if (Imm.isNegZero()) - return false; return Imm.isZero(); } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -285,6 +285,8 @@ /// Float constants def : Pat<(f64 (fpimm0)), (FCVT_D_W (i32 X0))>; +def : Pat<(f64 (fpimmneg0)), (FSGNJN_D (FCVT_D_W (i32 X0)), + (FCVT_D_W (i32 X0)))>; // double->[u]int. Round-to-zero must be used. def : Pat<(i32 (any_fp_to_sint FPR64:$rs1)), (FCVT_W_D FPR64:$rs1, 0b001)>; @@ -309,6 +311,8 @@ /// Float constants def : Pat<(f64 (fpimm0)), (FMV_D_X (i64 X0))>; +def : Pat<(f64 (fpimmneg0)), (FSGNJN_D (FMV_D_X (i64 X0)), + (FMV_D_X (i64 X0)))>; // Moves (no conversion) def : Pat<(bitconvert (i64 GPR:$rs1)), (FMV_D_X GPR:$rs1)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -320,7 +320,8 @@ //===----------------------------------------------------------------------===// /// Floating point constants -def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>; +def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>; +def fpimmneg0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(-0.0); }]>; /// Generic pattern classes class PatSetCC @@ -336,6 +337,7 @@ /// Float constants def : Pat<(f32 (fpimm0)), (FMV_W_X X0)>; +def : Pat<(f32 (fpimmneg0)), (FSGNJN_S (FMV_W_X X0), (FMV_W_X X0))>; /// Float conversion operations diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td @@ -201,6 +201,7 @@ /// Float constants def : Pat<(f16 (fpimm0)), (FMV_H_X X0)>; +def : Pat<(f16 (fpimmneg0)), (FSGNJN_H (FMV_H_X X0), (FMV_H_X X0))>; /// Float conversion operations diff --git a/llvm/test/CodeGen/RISCV/fp-imm.ll b/llvm/test/CodeGen/RISCV/fp-imm.ll --- a/llvm/test/CodeGen/RISCV/fp-imm.ll +++ b/llvm/test/CodeGen/RISCV/fp-imm.ll @@ -34,26 +34,26 @@ define float @f32_negative_zero(float *%pf) nounwind { ; RV32F-LABEL: f32_negative_zero: ; RV32F: # %bb.0: -; RV32F-NEXT: lui a0, %hi(.LCPI1_0) -; RV32F-NEXT: flw fa0, %lo(.LCPI1_0)(a0) +; RV32F-NEXT: fmv.w.x ft0, zero +; RV32F-NEXT: fneg.s fa0, ft0 ; RV32F-NEXT: ret ; ; RV32D-LABEL: f32_negative_zero: ; RV32D: # %bb.0: -; RV32D-NEXT: lui a0, %hi(.LCPI1_0) -; RV32D-NEXT: flw fa0, %lo(.LCPI1_0)(a0) +; RV32D-NEXT: fmv.w.x ft0, zero +; RV32D-NEXT: fneg.s fa0, ft0 ; RV32D-NEXT: ret ; ; RV64F-LABEL: f32_negative_zero: ; RV64F: # %bb.0: -; RV64F-NEXT: lui a0, %hi(.LCPI1_0) -; RV64F-NEXT: flw fa0, %lo(.LCPI1_0)(a0) +; RV64F-NEXT: fmv.w.x ft0, zero +; RV64F-NEXT: fneg.s fa0, ft0 ; RV64F-NEXT: ret ; ; RV64D-LABEL: f32_negative_zero: ; RV64D: # %bb.0: -; RV64D-NEXT: lui a0, %hi(.LCPI1_0) -; RV64D-NEXT: flw fa0, %lo(.LCPI1_0)(a0) +; RV64D-NEXT: fmv.w.x ft0, zero +; RV64D-NEXT: fneg.s fa0, ft0 ; RV64D-NEXT: ret ret float -0.0 } @@ -91,8 +91,8 @@ ; ; RV32D-LABEL: f64_negative_zero: ; RV32D: # %bb.0: -; RV32D-NEXT: lui a0, %hi(.LCPI3_0) -; RV32D-NEXT: fld fa0, %lo(.LCPI3_0)(a0) +; RV32D-NEXT: fcvt.d.w ft0, zero +; RV32D-NEXT: fneg.d fa0, ft0 ; RV32D-NEXT: ret ; ; RV64F-LABEL: f64_negative_zero: @@ -103,8 +103,8 @@ ; ; RV64D-LABEL: f64_negative_zero: ; RV64D: # %bb.0: -; RV64D-NEXT: lui a0, %hi(.LCPI3_0) -; RV64D-NEXT: fld fa0, %lo(.LCPI3_0)(a0) +; RV64D-NEXT: fmv.d.x ft0, zero +; RV64D-NEXT: fneg.d fa0, ft0 ; RV64D-NEXT: ret ret double -0.0 } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+zfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK -; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+zfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK +; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+zfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+zfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 declare half @llvm.vector.reduce.fadd.v1f16(half, <1 x half>) @@ -38,11 +38,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: fneg.h ft0, ft0 +; CHECK-NEXT: vfmv.s.f v9, ft0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 @@ -73,11 +71,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI4_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: fneg.h ft0, ft0 +; CHECK-NEXT: vfmv.s.f v9, ft0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 @@ -108,11 +104,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vlse16.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: fneg.h ft0, ft0 +; CHECK-NEXT: vfmv.s.f v9, ft0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 @@ -143,11 +137,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI8_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_0) -; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vlse16.v v10, (a0), zero -; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: fneg.h ft0, ft0 +; CHECK-NEXT: vfmv.s.f v10, ft0 ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.h fa0, fa0, ft0 @@ -179,10 +171,10 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI10_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: fneg.h ft0, ft0 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vlse16.v v12, (a0), zero +; CHECK-NEXT: vfmv.s.f v12, ft0 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -218,10 +210,10 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI12_0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: fneg.h ft0, ft0 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vlse16.v v16, (a0), zero +; CHECK-NEXT: vfmv.s.f v16, ft0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -260,10 +252,10 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle16.v v16, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI14_0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: fneg.h ft0, ft0 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vlse16.v v16, (a0), zero +; CHECK-NEXT: vfmv.s.f v16, ft0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -334,11 +326,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI18_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI18_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: fneg.s ft0, ft0 +; CHECK-NEXT: vfmv.s.f v9, ft0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 @@ -369,11 +359,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI20_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI20_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: fneg.s ft0, ft0 +; CHECK-NEXT: vfmv.s.f v9, ft0 ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 @@ -404,11 +392,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI22_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI22_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vlse32.v v10, (a0), zero -; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: fneg.s ft0, ft0 +; CHECK-NEXT: vfmv.s.f v10, ft0 ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 @@ -439,11 +425,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI24_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI24_0) -; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vlse32.v v12, (a0), zero -; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: fneg.s ft0, ft0 +; CHECK-NEXT: vfmv.s.f v12, ft0 ; CHECK-NEXT: vfredusum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s ft0, v8 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 @@ -475,10 +459,10 @@ ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI26_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI26_0) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: fneg.s ft0, ft0 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vlse32.v v16, (a0), zero +; CHECK-NEXT: vfmv.s.f v16, ft0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -517,10 +501,10 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v16, (a0) ; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: lui a0, %hi(.LCPI28_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI28_0) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: fneg.s ft0, ft0 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vlse32.v v16, (a0), zero +; CHECK-NEXT: vfmv.s.f v16, ft0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -587,19 +571,29 @@ declare double @llvm.vector.reduce.fadd.v2f64(double, <2 x double>) define double @vreduce_fadd_v2f64(<2 x double>* %x, double %s) { -; CHECK-LABEL: vreduce_fadd_v2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI32_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI32_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-NEXT: vfredusum.vs v8, v8, v9 -; CHECK-NEXT: vfmv.f.s ft0, v8 -; CHECK-NEXT: fadd.d fa0, fa0, ft0 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fadd_v2f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: fneg.d ft0, ft0 +; RV32-NEXT: vfmv.s.f v9, ft0 +; RV32-NEXT: vfredusum.vs v8, v8, v9 +; RV32-NEXT: vfmv.f.s ft0, v8 +; RV32-NEXT: fadd.d fa0, fa0, ft0 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fadd_v2f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: fneg.d ft0, ft0 +; RV64-NEXT: vfmv.s.f v9, ft0 +; RV64-NEXT: vfredusum.vs v8, v8, v9 +; RV64-NEXT: vfmv.f.s ft0, v8 +; RV64-NEXT: fadd.d fa0, fa0, ft0 +; RV64-NEXT: ret %v = load <2 x double>, <2 x double>* %x %red = call reassoc double @llvm.vector.reduce.fadd.v2f64(double %s, <2 x double> %v) ret double %red @@ -622,19 +616,29 @@ declare double @llvm.vector.reduce.fadd.v4f64(double, <4 x double>) define double @vreduce_fadd_v4f64(<4 x double>* %x, double %s) { -; CHECK-LABEL: vreduce_fadd_v4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI34_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI34_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu -; CHECK-NEXT: vfredusum.vs v8, v8, v10 -; CHECK-NEXT: vfmv.f.s ft0, v8 -; CHECK-NEXT: fadd.d fa0, fa0, ft0 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fadd_v4f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: fneg.d ft0, ft0 +; RV32-NEXT: vfmv.s.f v10, ft0 +; RV32-NEXT: vfredusum.vs v8, v8, v10 +; RV32-NEXT: vfmv.f.s ft0, v8 +; RV32-NEXT: fadd.d fa0, fa0, ft0 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fadd_v4f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: fneg.d ft0, ft0 +; RV64-NEXT: vfmv.s.f v10, ft0 +; RV64-NEXT: vfredusum.vs v8, v8, v10 +; RV64-NEXT: vfmv.f.s ft0, v8 +; RV64-NEXT: fadd.d fa0, fa0, ft0 +; RV64-NEXT: ret %v = load <4 x double>, <4 x double>* %x %red = call reassoc double @llvm.vector.reduce.fadd.v4f64(double %s, <4 x double> %v) ret double %red @@ -657,19 +661,29 @@ declare double @llvm.vector.reduce.fadd.v8f64(double, <8 x double>) define double @vreduce_fadd_v8f64(<8 x double>* %x, double %s) { -; CHECK-LABEL: vreduce_fadd_v8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI36_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI36_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu -; CHECK-NEXT: vfredusum.vs v8, v8, v12 -; CHECK-NEXT: vfmv.f.s ft0, v8 -; CHECK-NEXT: fadd.d fa0, fa0, ft0 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fadd_v8f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: fneg.d ft0, ft0 +; RV32-NEXT: vfmv.s.f v12, ft0 +; RV32-NEXT: vfredusum.vs v8, v8, v12 +; RV32-NEXT: vfmv.f.s ft0, v8 +; RV32-NEXT: fadd.d fa0, fa0, ft0 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fadd_v8f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: fneg.d ft0, ft0 +; RV64-NEXT: vfmv.s.f v12, ft0 +; RV64-NEXT: vfredusum.vs v8, v8, v12 +; RV64-NEXT: vfmv.f.s ft0, v8 +; RV64-NEXT: fadd.d fa0, fa0, ft0 +; RV64-NEXT: ret %v = load <8 x double>, <8 x double>* %x %red = call reassoc double @llvm.vector.reduce.fadd.v8f64(double %s, <8 x double> %v) ret double %red @@ -692,19 +706,29 @@ declare double @llvm.vector.reduce.fadd.v16f64(double, <16 x double>) define double @vreduce_fadd_v16f64(<16 x double>* %x, double %s) { -; CHECK-LABEL: vreduce_fadd_v16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: lui a0, %hi(.LCPI38_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI38_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu -; CHECK-NEXT: vfredusum.vs v8, v8, v16 -; CHECK-NEXT: vfmv.f.s ft0, v8 -; CHECK-NEXT: fadd.d fa0, fa0, ft0 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fadd_v16f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: fneg.d ft0, ft0 +; RV32-NEXT: vfmv.s.f v16, ft0 +; RV32-NEXT: vfredusum.vs v8, v8, v16 +; RV32-NEXT: vfmv.f.s ft0, v8 +; RV32-NEXT: fadd.d fa0, fa0, ft0 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fadd_v16f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: fneg.d ft0, ft0 +; RV64-NEXT: vfmv.s.f v16, ft0 +; RV64-NEXT: vfredusum.vs v8, v8, v16 +; RV64-NEXT: vfmv.f.s ft0, v8 +; RV64-NEXT: fadd.d fa0, fa0, ft0 +; RV64-NEXT: ret %v = load <16 x double>, <16 x double>* %x %red = call reassoc double @llvm.vector.reduce.fadd.v16f64(double %s, <16 x double> %v) ret double %red @@ -727,22 +751,35 @@ declare double @llvm.vector.reduce.fadd.v32f64(double, <32 x double>) define double @vreduce_fadd_v32f64(<32 x double>* %x, double %s) { -; CHECK-LABEL: vreduce_fadd_v32f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu -; CHECK-NEXT: vle64.v v8, (a0) -; CHECK-NEXT: addi a0, a0, 128 -; CHECK-NEXT: vle64.v v16, (a0) -; CHECK-NEXT: vfadd.vv v8, v8, v16 -; CHECK-NEXT: lui a0, %hi(.LCPI40_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI40_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v16, (a0), zero -; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu -; CHECK-NEXT: vfredusum.vs v8, v8, v16 -; CHECK-NEXT: vfmv.f.s ft0, v8 -; CHECK-NEXT: fadd.d fa0, fa0, ft0 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fadd_v32f64: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV32-NEXT: vle64.v v8, (a0) +; RV32-NEXT: addi a0, a0, 128 +; RV32-NEXT: vle64.v v16, (a0) +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: fneg.d ft0, ft0 +; RV32-NEXT: vfmv.s.f v24, ft0 +; RV32-NEXT: vfadd.vv v8, v8, v16 +; RV32-NEXT: vfredusum.vs v8, v8, v24 +; RV32-NEXT: vfmv.f.s ft0, v8 +; RV32-NEXT: fadd.d fa0, fa0, ft0 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fadd_v32f64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; RV64-NEXT: vle64.v v8, (a0) +; RV64-NEXT: addi a0, a0, 128 +; RV64-NEXT: vle64.v v16, (a0) +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: fneg.d ft0, ft0 +; RV64-NEXT: vfmv.s.f v24, ft0 +; RV64-NEXT: vfadd.vv v8, v8, v16 +; RV64-NEXT: vfredusum.vs v8, v8, v24 +; RV64-NEXT: vfmv.f.s ft0, v8 +; RV64-NEXT: fadd.d fa0, fa0, ft0 +; RV64-NEXT: ret %v = load <32 x double>, <32 x double>* %x %red = call reassoc double @llvm.vector.reduce.fadd.v32f64(double %s, <32 x double> %v) ret double %red diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll @@ -1,18 +1,18 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-v -target-abi=ilp32d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-v -target-abi=lp64d \ -; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 declare half @llvm.vector.reduce.fadd.nxv1f16(half, ) define half @vreduce_fadd_nxv1f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI0_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI0_0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: fneg.h ft0, ft0 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vlse16.v v9, (a0), zero +; CHECK-NEXT: vfmv.s.f v9, ft0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -40,10 +40,10 @@ define half @vreduce_fadd_nxv2f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI2_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: fneg.h ft0, ft0 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vlse16.v v9, (a0), zero +; CHECK-NEXT: vfmv.s.f v9, ft0 ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -71,10 +71,10 @@ define half @vreduce_fadd_nxv4f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI4_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI4_0) +; CHECK-NEXT: fmv.h.x ft0, zero +; CHECK-NEXT: fneg.h ft0, ft0 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, mu -; CHECK-NEXT: vlse16.v v9, (a0), zero +; CHECK-NEXT: vfmv.s.f v9, ft0 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -102,10 +102,10 @@ define float @vreduce_fadd_nxv1f32( %v, float %s) { ; CHECK-LABEL: vreduce_fadd_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: fneg.s ft0, ft0 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vlse32.v v9, (a0), zero +; CHECK-NEXT: vfmv.s.f v9, ft0 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -133,10 +133,10 @@ define float @vreduce_fadd_nxv2f32( %v, float %s) { ; CHECK-LABEL: vreduce_fadd_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI8_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_0) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: fneg.s ft0, ft0 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vlse32.v v9, (a0), zero +; CHECK-NEXT: vfmv.s.f v9, ft0 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -164,10 +164,10 @@ define float @vreduce_fadd_nxv4f32( %v, float %s) { ; CHECK-LABEL: vreduce_fadd_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI10_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_0) +; CHECK-NEXT: fmv.w.x ft0, zero +; CHECK-NEXT: fneg.s ft0, ft0 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu -; CHECK-NEXT: vlse32.v v10, (a0), zero +; CHECK-NEXT: vfmv.s.f v10, ft0 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfredusum.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s ft0, v8 @@ -193,17 +193,29 @@ declare double @llvm.vector.reduce.fadd.nxv1f64(double, ) define double @vreduce_fadd_nxv1f64( %v, double %s) { -; CHECK-LABEL: vreduce_fadd_nxv1f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI12_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI12_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v9, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: vfredusum.vs v8, v8, v9 -; CHECK-NEXT: vfmv.f.s ft0, v8 -; CHECK-NEXT: fadd.d fa0, fa0, ft0 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fadd_nxv1f64: +; RV32: # %bb.0: +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: fneg.d ft0, ft0 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vfmv.s.f v9, ft0 +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: vfredusum.vs v8, v8, v9 +; RV32-NEXT: vfmv.f.s ft0, v8 +; RV32-NEXT: fadd.d fa0, fa0, ft0 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fadd_nxv1f64: +; RV64: # %bb.0: +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: fneg.d ft0, ft0 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vfmv.s.f v9, ft0 +; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV64-NEXT: vfredusum.vs v8, v8, v9 +; RV64-NEXT: vfmv.f.s ft0, v8 +; RV64-NEXT: fadd.d fa0, fa0, ft0 +; RV64-NEXT: ret %red = call reassoc double @llvm.vector.reduce.fadd.nxv1f64(double %s, %v) ret double %red } @@ -224,17 +236,29 @@ declare double @llvm.vector.reduce.fadd.nxv2f64(double, ) define double @vreduce_fadd_nxv2f64( %v, double %s) { -; CHECK-LABEL: vreduce_fadd_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI14_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI14_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v10, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: vfredusum.vs v8, v8, v10 -; CHECK-NEXT: vfmv.f.s ft0, v8 -; CHECK-NEXT: fadd.d fa0, fa0, ft0 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fadd_nxv2f64: +; RV32: # %bb.0: +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: fneg.d ft0, ft0 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vfmv.s.f v10, ft0 +; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV32-NEXT: vfredusum.vs v8, v8, v10 +; RV32-NEXT: vfmv.f.s ft0, v8 +; RV32-NEXT: fadd.d fa0, fa0, ft0 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fadd_nxv2f64: +; RV64: # %bb.0: +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: fneg.d ft0, ft0 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vfmv.s.f v10, ft0 +; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu +; RV64-NEXT: vfredusum.vs v8, v8, v10 +; RV64-NEXT: vfmv.f.s ft0, v8 +; RV64-NEXT: fadd.d fa0, fa0, ft0 +; RV64-NEXT: ret %red = call reassoc double @llvm.vector.reduce.fadd.nxv2f64(double %s, %v) ret double %red } @@ -255,17 +279,29 @@ declare double @llvm.vector.reduce.fadd.nxv4f64(double, ) define double @vreduce_fadd_nxv4f64( %v, double %s) { -; CHECK-LABEL: vreduce_fadd_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI16_0) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI16_0) -; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu -; CHECK-NEXT: vlse64.v v12, (a0), zero -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: vfredusum.vs v8, v8, v12 -; CHECK-NEXT: vfmv.f.s ft0, v8 -; CHECK-NEXT: fadd.d fa0, fa0, ft0 -; CHECK-NEXT: ret +; RV32-LABEL: vreduce_fadd_nxv4f64: +; RV32: # %bb.0: +; RV32-NEXT: fcvt.d.w ft0, zero +; RV32-NEXT: fneg.d ft0, ft0 +; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV32-NEXT: vfmv.s.f v12, ft0 +; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV32-NEXT: vfredusum.vs v8, v8, v12 +; RV32-NEXT: vfmv.f.s ft0, v8 +; RV32-NEXT: fadd.d fa0, fa0, ft0 +; RV32-NEXT: ret +; +; RV64-LABEL: vreduce_fadd_nxv4f64: +; RV64: # %bb.0: +; RV64-NEXT: fmv.d.x ft0, zero +; RV64-NEXT: fneg.d ft0, ft0 +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu +; RV64-NEXT: vfmv.s.f v12, ft0 +; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; RV64-NEXT: vfredusum.vs v8, v8, v12 +; RV64-NEXT: vfmv.f.s ft0, v8 +; RV64-NEXT: fadd.d fa0, fa0, ft0 +; RV64-NEXT: ret %red = call reassoc double @llvm.vector.reduce.fadd.nxv4f64(double %s, %v) ret double %red } diff --git a/llvm/test/CodeGen/RISCV/zfh-imm.ll b/llvm/test/CodeGen/RISCV/zfh-imm.ll --- a/llvm/test/CodeGen/RISCV/zfh-imm.ll +++ b/llvm/test/CodeGen/RISCV/zfh-imm.ll @@ -34,26 +34,26 @@ define half @f16_negative_zero(half *%pf) nounwind { ; RV32IZFH-LABEL: f16_negative_zero: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV32IZFH-NEXT: flh fa0, %lo(.LCPI1_0)(a0) +; RV32IZFH-NEXT: fmv.h.x ft0, zero +; RV32IZFH-NEXT: fneg.h fa0, ft0 ; RV32IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: f16_negative_zero: ; RV32IDZFH: # %bb.0: -; RV32IDZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV32IDZFH-NEXT: flh fa0, %lo(.LCPI1_0)(a0) +; RV32IDZFH-NEXT: fmv.h.x ft0, zero +; RV32IDZFH-NEXT: fneg.h fa0, ft0 ; RV32IDZFH-NEXT: ret ; ; RV64IZFH-LABEL: f16_negative_zero: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV64IZFH-NEXT: flh fa0, %lo(.LCPI1_0)(a0) +; RV64IZFH-NEXT: fmv.h.x ft0, zero +; RV64IZFH-NEXT: fneg.h fa0, ft0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: f16_negative_zero: ; RV64IDZFH: # %bb.0: -; RV64IDZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV64IDZFH-NEXT: flh fa0, %lo(.LCPI1_0)(a0) +; RV64IDZFH-NEXT: fmv.h.x ft0, zero +; RV64IDZFH-NEXT: fneg.h fa0, ft0 ; RV64IDZFH-NEXT: ret ret half -0.0 }