Index: llvm/lib/Target/RISCV/RISCVISelLowering.h =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.h +++ llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -74,6 +74,8 @@ bool isTruncateFree(EVT SrcVT, EVT DstVT) const override; bool isZExtFree(SDValue Val, EVT VT2) const override; bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override; + bool isFPImmLegal(const APFloat &Imm, EVT VT, + bool ForCodeSize) const override; bool hasBitPreservingFPLogic(EVT VT) const override; Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -336,6 +336,17 @@ return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; } +bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, + bool ForCodeSize) const { + if (VT == MVT::f32 && !Subtarget.hasStdExtF()) + return false; + if (VT == MVT::f64 && !Subtarget.hasStdExtD()) + return false; + if (Imm.isNegZero()) + return false; + return Imm.isZero(); +} + bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { return (VT == MVT::f32 && Subtarget.hasStdExtF()) || (VT == MVT::f64 && Subtarget.hasStdExtD()); Index: llvm/lib/Target/RISCV/RISCVInstrInfoD.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -339,6 +339,10 @@ } // Predicates = [HasStdExtD] let Predicates = [HasStdExtD, IsRV32] in { + +/// Float constants +def : Pat<(f64 (fpimm0)), (FCVT_D_W X0)>; + // double->[u]int. Round-to-zero must be used. def : Pat<(fp_to_sint FPR64:$rs1), (FCVT_W_D FPR64:$rs1, 0b001)>; def : Pat<(fp_to_uint FPR64:$rs1), (FCVT_WU_D FPR64:$rs1, 0b001)>; @@ -349,6 +353,10 @@ } // Predicates = [HasStdExtD, IsRV32] let Predicates = [HasStdExtD, IsRV64] in { + +/// Float constants +def : Pat<(f64 (fpimm0)), (FMV_D_X X0)>; + def : Pat<(bitconvert GPR:$rs1), (FMV_D_X GPR:$rs1)>; def : Pat<(bitconvert FPR64:$rs1), (FMV_X_D FPR64:$rs1)>; Index: llvm/lib/Target/RISCV/RISCVInstrInfoF.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -286,6 +286,9 @@ // Pseudo-instructions and codegen patterns //===----------------------------------------------------------------------===// +/// Floating point constants +def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>; + /// Generic pattern classes class PatFpr32Fpr32 : Pat<(OpNode FPR32:$rs1, FPR32:$rs2), (Inst $rs1, $rs2)>; @@ -295,6 +298,9 @@ let Predicates = [HasStdExtF] in { +/// Float constants +def : Pat<(f32 (fpimm0)), (FMV_W_X X0)>; + /// Float conversion operations // Moves (no conversion) Index: llvm/test/CodeGen/RISCV/double-arith.ll =================================================================== --- llvm/test/CodeGen/RISCV/double-arith.ll +++ llvm/test/CodeGen/RISCV/double-arith.ll @@ -460,9 +460,7 @@ ; RV32IFD-NEXT: sw a4, 8(sp) ; RV32IFD-NEXT: sw a5, 12(sp) ; RV32IFD-NEXT: fld ft2, 8(sp) -; RV32IFD-NEXT: lui a0, %hi(.LCPI15_0) -; RV32IFD-NEXT: addi a0, a0, %lo(.LCPI15_0) -; RV32IFD-NEXT: fld ft3, 0(a0) +; RV32IFD-NEXT: fcvt.d.w ft3, zero ; RV32IFD-NEXT: fadd.d ft2, ft2, ft3 ; RV32IFD-NEXT: fmsub.d ft0, ft1, ft0, ft2 ; RV32IFD-NEXT: fsd ft0, 8(sp) @@ -473,14 +471,12 @@ ; ; RV64IFD-LABEL: fmsub_d: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a3, %hi(.LCPI15_0) -; RV64IFD-NEXT: addi a3, a3, %lo(.LCPI15_0) -; RV64IFD-NEXT: fld ft0, 0(a3) -; RV64IFD-NEXT: fmv.d.x ft1, a1 -; RV64IFD-NEXT: fmv.d.x ft2, a0 -; RV64IFD-NEXT: fmv.d.x ft3, a2 -; RV64IFD-NEXT: fadd.d ft0, ft3, ft0 -; RV64IFD-NEXT: fmsub.d ft0, ft2, ft1, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmv.d.x ft2, a2 +; RV64IFD-NEXT: fmv.d.x ft3, zero +; RV64IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV64IFD-NEXT: fmsub.d ft0, ft1, ft0, ft2 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret %c_ = fadd double 0.0, %c ; avoid negation using xor @@ -502,9 +498,7 @@ ; RV32IFD-NEXT: sw a0, 8(sp) ; RV32IFD-NEXT: sw a1, 12(sp) ; RV32IFD-NEXT: fld ft2, 8(sp) -; RV32IFD-NEXT: lui a0, %hi(.LCPI16_0) -; RV32IFD-NEXT: addi a0, a0, %lo(.LCPI16_0) -; RV32IFD-NEXT: fld ft3, 0(a0) +; RV32IFD-NEXT: fcvt.d.w ft3, zero ; RV32IFD-NEXT: fadd.d ft2, ft2, ft3 ; RV32IFD-NEXT: fadd.d ft1, ft1, ft3 ; RV32IFD-NEXT: fnmadd.d ft0, ft2, ft0, ft1 @@ -516,15 +510,13 @@ ; ; RV64IFD-LABEL: fnmadd_d: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a3, %hi(.LCPI16_0) -; RV64IFD-NEXT: addi a3, a3, %lo(.LCPI16_0) -; RV64IFD-NEXT: fld ft0, 0(a3) -; RV64IFD-NEXT: fmv.d.x ft1, a1 -; RV64IFD-NEXT: fmv.d.x ft2, a2 -; RV64IFD-NEXT: fmv.d.x ft3, a0 -; RV64IFD-NEXT: fadd.d ft3, ft3, ft0 -; RV64IFD-NEXT: fadd.d ft0, ft2, ft0 -; RV64IFD-NEXT: fnmadd.d ft0, ft3, ft1, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a2 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fmv.d.x ft3, zero +; RV64IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV64IFD-NEXT: fadd.d ft1, ft1, ft3 +; RV64IFD-NEXT: fnmadd.d ft0, ft2, ft0, ft1 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret %a_ = fadd double 0.0, %a @@ -548,9 +540,7 @@ ; RV32IFD-NEXT: sw a0, 8(sp) ; RV32IFD-NEXT: sw a1, 12(sp) ; RV32IFD-NEXT: fld ft2, 8(sp) -; RV32IFD-NEXT: lui a0, %hi(.LCPI17_0) -; RV32IFD-NEXT: addi a0, a0, %lo(.LCPI17_0) -; RV32IFD-NEXT: fld ft3, 0(a0) +; RV32IFD-NEXT: fcvt.d.w ft3, zero ; RV32IFD-NEXT: fadd.d ft2, ft2, ft3 ; RV32IFD-NEXT: fnmsub.d ft0, ft2, ft1, ft0 ; RV32IFD-NEXT: fsd ft0, 8(sp) @@ -561,14 +551,12 @@ ; ; RV64IFD-LABEL: fnmsub_d: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a3, %hi(.LCPI17_0) -; RV64IFD-NEXT: addi a3, a3, %lo(.LCPI17_0) -; RV64IFD-NEXT: fld ft0, 0(a3) -; RV64IFD-NEXT: fmv.d.x ft1, a2 -; RV64IFD-NEXT: fmv.d.x ft2, a1 -; RV64IFD-NEXT: fmv.d.x ft3, a0 -; RV64IFD-NEXT: fadd.d ft0, ft3, ft0 -; RV64IFD-NEXT: fnmsub.d ft0, ft0, ft2, ft1 +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fmv.d.x ft3, zero +; RV64IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV64IFD-NEXT: fnmsub.d ft0, ft2, ft1, ft0 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret %a_ = fadd double 0.0, %a Index: llvm/test/CodeGen/RISCV/float-arith.ll =================================================================== --- llvm/test/CodeGen/RISCV/float-arith.ll +++ llvm/test/CodeGen/RISCV/float-arith.ll @@ -339,27 +339,23 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind { ; RV32IF-LABEL: fmsub_s: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a3, %hi(.LCPI15_0) -; RV32IF-NEXT: addi a3, a3, %lo(.LCPI15_0) -; RV32IF-NEXT: flw ft0, 0(a3) -; RV32IF-NEXT: fmv.w.x ft1, a1 -; RV32IF-NEXT: fmv.w.x ft2, a0 -; RV32IF-NEXT: fmv.w.x ft3, a2 -; RV32IF-NEXT: fadd.s ft0, ft3, ft0 -; RV32IF-NEXT: fmsub.s ft0, ft2, ft1, ft0 +; RV32IF-NEXT: fmv.w.x ft0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fmv.w.x ft2, a2 +; RV32IF-NEXT: fmv.w.x ft3, zero +; RV32IF-NEXT: fadd.s ft2, ft2, ft3 +; RV32IF-NEXT: fmsub.s ft0, ft1, ft0, ft2 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fmsub_s: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a3, %hi(.LCPI15_0) -; RV64IF-NEXT: addi a3, a3, %lo(.LCPI15_0) -; RV64IF-NEXT: flw ft0, 0(a3) -; RV64IF-NEXT: fmv.w.x ft1, a1 -; RV64IF-NEXT: fmv.w.x ft2, a0 -; RV64IF-NEXT: fmv.w.x ft3, a2 -; RV64IF-NEXT: fadd.s ft0, ft3, ft0 -; RV64IF-NEXT: fmsub.s ft0, ft2, ft1, ft0 +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fmv.w.x ft2, a2 +; RV64IF-NEXT: fmv.w.x ft3, zero +; RV64IF-NEXT: fadd.s ft2, ft2, ft3 +; RV64IF-NEXT: fmsub.s ft0, ft1, ft0, ft2 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret %c_ = fadd float 0.0, %c ; avoid negation using xor @@ -371,29 +367,25 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind { ; RV32IF-LABEL: fnmadd_s: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a3, %hi(.LCPI16_0) -; RV32IF-NEXT: addi a3, a3, %lo(.LCPI16_0) -; RV32IF-NEXT: flw ft0, 0(a3) -; RV32IF-NEXT: fmv.w.x ft1, a1 -; RV32IF-NEXT: fmv.w.x ft2, a2 -; RV32IF-NEXT: fmv.w.x ft3, a0 -; RV32IF-NEXT: fadd.s ft3, ft3, ft0 -; RV32IF-NEXT: fadd.s ft0, ft2, ft0 -; RV32IF-NEXT: fnmadd.s ft0, ft3, ft1, ft0 +; RV32IF-NEXT: fmv.w.x ft0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a2 +; RV32IF-NEXT: fmv.w.x ft2, a0 +; RV32IF-NEXT: fmv.w.x ft3, zero +; RV32IF-NEXT: fadd.s ft2, ft2, ft3 +; RV32IF-NEXT: fadd.s ft1, ft1, ft3 +; RV32IF-NEXT: fnmadd.s ft0, ft2, ft0, ft1 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fnmadd_s: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a3, %hi(.LCPI16_0) -; RV64IF-NEXT: addi a3, a3, %lo(.LCPI16_0) -; RV64IF-NEXT: flw ft0, 0(a3) -; RV64IF-NEXT: fmv.w.x ft1, a1 -; RV64IF-NEXT: fmv.w.x ft2, a2 -; RV64IF-NEXT: fmv.w.x ft3, a0 -; RV64IF-NEXT: fadd.s ft3, ft3, ft0 -; RV64IF-NEXT: fadd.s ft0, ft2, ft0 -; RV64IF-NEXT: fnmadd.s ft0, ft3, ft1, ft0 +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a2 +; RV64IF-NEXT: fmv.w.x ft2, a0 +; RV64IF-NEXT: fmv.w.x ft3, zero +; RV64IF-NEXT: fadd.s ft2, ft2, ft3 +; RV64IF-NEXT: fadd.s ft1, ft1, ft3 +; RV64IF-NEXT: fnmadd.s ft0, ft2, ft0, ft1 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret %a_ = fadd float 0.0, %a @@ -407,27 +399,23 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind { ; RV32IF-LABEL: fnmsub_s: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a3, %hi(.LCPI17_0) -; RV32IF-NEXT: addi a3, a3, %lo(.LCPI17_0) -; RV32IF-NEXT: flw ft0, 0(a3) -; RV32IF-NEXT: fmv.w.x ft1, a2 -; RV32IF-NEXT: fmv.w.x ft2, a1 -; RV32IF-NEXT: fmv.w.x ft3, a0 -; RV32IF-NEXT: fadd.s ft0, ft3, ft0 -; RV32IF-NEXT: fnmsub.s ft0, ft0, ft2, ft1 +; RV32IF-NEXT: fmv.w.x ft0, a2 +; RV32IF-NEXT: fmv.w.x ft1, a1 +; RV32IF-NEXT: fmv.w.x ft2, a0 +; RV32IF-NEXT: fmv.w.x ft3, zero +; RV32IF-NEXT: fadd.s ft2, ft2, ft3 +; RV32IF-NEXT: fnmsub.s ft0, ft2, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fnmsub_s: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a3, %hi(.LCPI17_0) -; RV64IF-NEXT: addi a3, a3, %lo(.LCPI17_0) -; RV64IF-NEXT: flw ft0, 0(a3) -; RV64IF-NEXT: fmv.w.x ft1, a2 -; RV64IF-NEXT: fmv.w.x ft2, a1 -; RV64IF-NEXT: fmv.w.x ft3, a0 -; RV64IF-NEXT: fadd.s ft0, ft3, ft0 -; RV64IF-NEXT: fnmsub.s ft0, ft0, ft2, ft1 +; RV64IF-NEXT: fmv.w.x ft0, a2 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fmv.w.x ft2, a0 +; RV64IF-NEXT: fmv.w.x ft3, zero +; RV64IF-NEXT: fadd.s ft2, ft2, ft3 +; RV64IF-NEXT: fnmsub.s ft0, ft2, ft1, ft0 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret %a_ = fadd float 0.0, %a Index: llvm/test/CodeGen/RISCV/float-br-fcmp.ll =================================================================== --- llvm/test/CodeGen/RISCV/float-br-fcmp.ll +++ llvm/test/CodeGen/RISCV/float-br-fcmp.ll @@ -720,10 +720,8 @@ ; RV32IF-NEXT: sw ra, 12(sp) ; RV32IF-NEXT: mv a0, zero ; RV32IF-NEXT: call dummy -; RV32IF-NEXT: lui a1, %hi(.LCPI17_0) -; RV32IF-NEXT: addi a1, a1, %lo(.LCPI17_0) -; RV32IF-NEXT: flw ft1, 0(a1) ; RV32IF-NEXT: fmv.w.x ft0, a0 +; RV32IF-NEXT: fmv.w.x ft1, zero ; RV32IF-NEXT: fsw ft1, 8(sp) ; RV32IF-NEXT: feq.s a0, ft0, ft1 ; RV32IF-NEXT: beqz a0, .LBB17_3 @@ -747,9 +745,7 @@ ; RV64IF-NEXT: addi sp, sp, -32 ; RV64IF-NEXT: sd ra, 24(sp) ; RV64IF-NEXT: sd s0, 16(sp) -; RV64IF-NEXT: lui a0, %hi(.LCPI17_0) -; RV64IF-NEXT: addi a0, a0, %lo(.LCPI17_0) -; RV64IF-NEXT: flw ft0, 0(a0) +; RV64IF-NEXT: fmv.w.x ft0, zero ; RV64IF-NEXT: fsw ft0, 12(sp) ; RV64IF-NEXT: fmv.x.w s0, ft0 ; RV64IF-NEXT: mv a0, s0 Index: llvm/test/CodeGen/RISCV/fp-imm.ll =================================================================== --- llvm/test/CodeGen/RISCV/fp-imm.ll +++ llvm/test/CodeGen/RISCV/fp-imm.ll @@ -11,30 +11,22 @@ define float @f32_positive_zero(float *%pf) nounwind { ; RV32F-LABEL: f32_positive_zero: ; RV32F: # %bb.0: -; RV32F-NEXT: lui a0, %hi(.LCPI0_0) -; RV32F-NEXT: addi a0, a0, %lo(.LCPI0_0) -; RV32F-NEXT: flw fa0, 0(a0) +; RV32F-NEXT: fmv.w.x fa0, zero ; RV32F-NEXT: ret ; ; RV32D-LABEL: f32_positive_zero: ; RV32D: # %bb.0: -; RV32D-NEXT: lui a0, %hi(.LCPI0_0) -; RV32D-NEXT: addi a0, a0, %lo(.LCPI0_0) -; RV32D-NEXT: flw fa0, 0(a0) +; RV32D-NEXT: fmv.w.x fa0, zero ; RV32D-NEXT: ret ; ; RV64F-LABEL: f32_positive_zero: ; RV64F: # %bb.0: -; RV64F-NEXT: lui a0, %hi(.LCPI0_0) -; RV64F-NEXT: addi a0, a0, %lo(.LCPI0_0) -; RV64F-NEXT: flw fa0, 0(a0) +; RV64F-NEXT: fmv.w.x fa0, zero ; RV64F-NEXT: ret ; ; RV64D-LABEL: f32_positive_zero: ; RV64D: # %bb.0: -; RV64D-NEXT: lui a0, %hi(.LCPI0_0) -; RV64D-NEXT: addi a0, a0, %lo(.LCPI0_0) -; RV64D-NEXT: flw fa0, 0(a0) +; RV64D-NEXT: fmv.w.x fa0, zero ; RV64D-NEXT: ret ret float 0.0 } @@ -79,9 +71,7 @@ ; ; RV32D-LABEL: f64_positive_zero: ; RV32D: # %bb.0: -; RV32D-NEXT: lui a0, %hi(.LCPI2_0) -; RV32D-NEXT: addi a0, a0, %lo(.LCPI2_0) -; RV32D-NEXT: fld fa0, 0(a0) +; RV32D-NEXT: fcvt.d.w fa0, zero ; RV32D-NEXT: ret ; ; RV64F-LABEL: f64_positive_zero: @@ -91,9 +81,7 @@ ; ; RV64D-LABEL: f64_positive_zero: ; RV64D: # %bb.0: -; RV64D-NEXT: lui a0, %hi(.LCPI2_0) -; RV64D-NEXT: addi a0, a0, %lo(.LCPI2_0) -; RV64D-NEXT: fld fa0, 0(a0) +; RV64D-NEXT: fmv.d.x fa0, zero ; RV64D-NEXT: ret ret double 0.0 }