diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -88,6 +88,7 @@ bool selectVSplatUimm5(SDValue N, SDValue &SplatVal); bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal); bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal); + bool selectFPImm(SDValue N, SDValue &Imm); bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm); template bool selectRVVSimm5(SDValue N, SDValue &Imm) { diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -709,6 +709,38 @@ ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget)); return; } + case ISD::ConstantFP: { + unsigned BitSize = VT.getSizeInBits().getFixedValue(); + const APFloat &APF = cast(Node)->getValueAPF(); + // td can handle +0.0 already. + if (APF.isPosZero()) + break; + // Special case: a 64 bit -0.0 uses more instructions than fmv + fneg. + if (APF.isNegZero() && BitSize == 64) + break; + assert((BitSize <= Subtarget->getXLen()) && + "Cannot create a 64 bit floating-point immediate value for rv32"); + SDValue Imm = + SDValue(selectImm(CurDAG, DL, XLenVT, + APF.bitcastToAPInt().getSExtValue(), *Subtarget), + 0); + unsigned Opc; + switch (BitSize) { + default: + llvm_unreachable("Unexpected size"); + case 16: + Opc = RISCV::FMV_H_X; + break; + case 32: + Opc = RISCV::FMV_W_X; + break; + case 64: + Opc = RISCV::FMV_D_X; + break; + } + ReplaceNode(Node, CurDAG->getMachineNode(Opc, DL, VT, Imm)); + return; + } case ISD::SHL: { auto *N1C = dyn_cast(Node->getOperand(1)); if (!N1C) @@ -2554,6 +2586,22 @@ return true; } +bool RISCVDAGToDAGISel::selectFPImm(SDValue N, SDValue &Imm) { + ConstantFPSDNode *CFP = dyn_cast(N.getNode()); + if (!CFP) + return false; + const APFloat &APF = CFP->getValueAPF(); + // td can handle +0.0 already. + if (APF.isPosZero()) + return false; + SDLoc DL(N); + MVT XLenVT = Subtarget->getXLenVT(); + Imm = SDValue(selectImm(CurDAG, DL, XLenVT, + APF.bitcastToAPInt().getSExtValue(), *Subtarget), + 0); + return true; +} + bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm) { if (auto *C = dyn_cast(N)) { diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -65,6 +65,12 @@ "transformation to multiplications by the reciprocal"), cl::init(2)); +static cl::opt + FPImmCost(DEBUG_TYPE "-fpimm-cost", cl::Hidden, + cl::desc("Give the maximum number of instructions that we will " + "use for creating a floating-point immediate value"), + cl::init(2)); + RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, const RISCVSubtarget &STI) : TargetLowering(TM), Subtarget(STI) { @@ -1456,7 +1462,20 @@ return false; if (VT == MVT::f64 && !Subtarget.hasStdExtD()) return false; - return Imm.isZero(); + // Cannot create a 64 bit floating-point immediate value for rv32. + if (Subtarget.getXLen() < VT.getScalarSizeInBits()) { + // td can handle +0.0 or -0.0 already. + // -0.0 can be created by fmv + fneg. + return Imm.isZero(); + } + // Special case: the cost for -0.0 is 1. + int Cost = Imm.isNegZero() + ? 1 + : RISCVMatInt::getIntMatCost(Imm.bitcastToAPInt(), + Subtarget.getXLen(), + Subtarget.getFeatureBits()); + // If the constantpool data is already in cache, only Cost 1 is cheaper. + return Cost < FPImmCost; } // TODO: This is very conservative. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -405,6 +405,8 @@ def SplatPat_simm5_plus1_nonzero : ComplexPattern; +def SelectFPImm : ComplexPattern; + // Ignore the vl operand. def SplatFPOp : PatFrag<(ops node:$op), (riscv_vfmv_v_f_vl undef, node:$op, srcvalue)>; @@ -1736,6 +1738,15 @@ (fvti.Scalar fvti.ScalarRegClass:$rs1), (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), + (SplatFPOp (SelectFPImm (XLenVT GPR:$imm))), + fvti.RegClass:$rs2, + VLOpFrag)), + (!cast("PseudoVMERGE_VXM_"#fvti.LMul.MX) + fvti.RegClass:$rs2, + GPR:$imm, + (fvti.Mask V0), GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fvti.Vector (riscv_vselect_vl (fvti.Mask V0), (SplatFPOp (fvti.Scalar fpimm0)), fvti.RegClass:$rs2, @@ -1778,6 +1789,14 @@ fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), (!cast("PseudoVMV_V_I_"#fvti.LMul.MX#"_TU") $passthru, 0, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl + (fvti.Vector undef), (fvti.Scalar (SelectFPImm (XLenVT GPR:$imm))), VLOpFrag)), + (!cast("PseudoVMV_V_X_"#fvti.LMul.MX) + GPR:$imm, GPR:$vl, fvti.Log2SEW)>; + def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl + fvti.Vector:$passthru, (fvti.Scalar (SelectFPImm (XLenVT GPR:$imm))), VLOpFrag)), + (!cast("PseudoVMV_V_X_"#fvti.LMul.MX#"_TU") + $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW)>; def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl (fvti.Vector undef), (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), @@ -2055,6 +2074,11 @@ VLOpFrag)), (!cast("PseudoVMV_S_X_"#vti.LMul.MX) vti.RegClass:$merge, X0, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), + (vti.Scalar (SelectFPImm (XLenVT GPR:$imm))), + VLOpFrag)), + (!cast("PseudoVMV_S_X_"#vti.LMul.MX) + vti.RegClass:$merge, GPR:$imm, GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge), vti.ScalarRegClass:$rs1, VLOpFrag)), diff --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll @@ -163,8 +163,8 @@ ; RV64IF: # %bb.0: ; RV64IF-NEXT: addi sp, sp, -16 ; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64IF-NEXT: lui a0, %hi(.LCPI1_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI1_0)(a0) +; RV64IF-NEXT: lui a0, 1048564 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fmv.x.w a1, ft0 ; RV64IF-NEXT: li a0, 1 ; RV64IF-NEXT: call callee_half_in_regs@plt @@ -176,8 +176,8 @@ ; RV32-ILP32F: # %bb.0: ; RV32-ILP32F-NEXT: addi sp, sp, -16 ; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-ILP32F-NEXT: lui a0, %hi(.LCPI1_0) -; RV32-ILP32F-NEXT: flw fa0, %lo(.LCPI1_0)(a0) +; RV32-ILP32F-NEXT: lui a0, 1048564 +; RV32-ILP32F-NEXT: fmv.w.x fa0, a0 ; RV32-ILP32F-NEXT: li a0, 1 ; RV32-ILP32F-NEXT: call callee_half_in_regs@plt ; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -188,8 +188,8 @@ ; RV64-LP64F: # %bb.0: ; RV64-LP64F-NEXT: addi sp, sp, -16 ; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-LP64F-NEXT: lui a0, %hi(.LCPI1_0) -; RV64-LP64F-NEXT: flw fa0, %lo(.LCPI1_0)(a0) +; RV64-LP64F-NEXT: lui a0, 1048564 +; RV64-LP64F-NEXT: fmv.w.x fa0, a0 ; RV64-LP64F-NEXT: li a0, 1 ; RV64-LP64F-NEXT: call callee_half_in_regs@plt ; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -200,8 +200,8 @@ ; RV32-ILP32ZFHMIN: # %bb.0: ; RV32-ILP32ZFHMIN-NEXT: addi sp, sp, -16 ; RV32-ILP32ZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-ILP32ZFHMIN-NEXT: lui a0, %hi(.LCPI1_0) -; RV32-ILP32ZFHMIN-NEXT: flh fa0, %lo(.LCPI1_0)(a0) +; RV32-ILP32ZFHMIN-NEXT: lui a0, 4 +; RV32-ILP32ZFHMIN-NEXT: fmv.h.x fa0, a0 ; RV32-ILP32ZFHMIN-NEXT: li a0, 1 ; RV32-ILP32ZFHMIN-NEXT: call callee_half_in_regs@plt ; RV32-ILP32ZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -212,8 +212,8 @@ ; RV64-LP64ZFHMIN: # %bb.0: ; RV64-LP64ZFHMIN-NEXT: addi sp, sp, -16 ; RV64-LP64ZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64-LP64ZFHMIN-NEXT: lui a0, %hi(.LCPI1_0) -; RV64-LP64ZFHMIN-NEXT: flh fa0, %lo(.LCPI1_0)(a0) +; RV64-LP64ZFHMIN-NEXT: lui a0, 4 +; RV64-LP64ZFHMIN-NEXT: fmv.h.x fa0, a0 ; RV64-LP64ZFHMIN-NEXT: li a0, 1 ; RV64-LP64ZFHMIN-NEXT: call callee_half_in_regs@plt ; RV64-LP64ZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll @@ -25,8 +25,8 @@ ; RV32-ILP32FD: # %bb.0: ; RV32-ILP32FD-NEXT: addi sp, sp, -16 ; RV32-ILP32FD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI1_0) -; RV32-ILP32FD-NEXT: flw fa0, %lo(.LCPI1_0)(a0) +; RV32-ILP32FD-NEXT: lui a0, 262144 +; RV32-ILP32FD-NEXT: fmv.w.x fa0, a0 ; RV32-ILP32FD-NEXT: li a0, 1 ; RV32-ILP32FD-NEXT: call callee_float_in_fpr@plt ; RV32-ILP32FD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -55,8 +55,8 @@ ; RV32-ILP32FD-NEXT: addi sp, sp, -16 ; RV32-ILP32FD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-ILP32FD-NEXT: li a1, 5 -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI3_0) -; RV32-ILP32FD-NEXT: flw fa0, %lo(.LCPI3_0)(a0) +; RV32-ILP32FD-NEXT: lui a0, 265216 +; RV32-ILP32FD-NEXT: fmv.w.x fa0, a0 ; RV32-ILP32FD-NEXT: li a0, 1 ; RV32-ILP32FD-NEXT: li a2, 2 ; RV32-ILP32FD-NEXT: li a4, 3 @@ -95,22 +95,22 @@ ; RV32-ILP32FD: # %bb.0: ; RV32-ILP32FD-NEXT: addi sp, sp, -16 ; RV32-ILP32FD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_0) -; RV32-ILP32FD-NEXT: flw fa0, %lo(.LCPI5_0)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_1) -; RV32-ILP32FD-NEXT: flw fa1, %lo(.LCPI5_1)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_2) -; RV32-ILP32FD-NEXT: flw fa2, %lo(.LCPI5_2)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_3) -; RV32-ILP32FD-NEXT: flw fa3, %lo(.LCPI5_3)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_4) -; RV32-ILP32FD-NEXT: flw fa4, %lo(.LCPI5_4)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_5) -; RV32-ILP32FD-NEXT: flw fa5, %lo(.LCPI5_5)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_6) -; RV32-ILP32FD-NEXT: flw fa6, %lo(.LCPI5_6)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_7) -; RV32-ILP32FD-NEXT: flw fa7, %lo(.LCPI5_7)(a0) +; RV32-ILP32FD-NEXT: lui a0, 260096 +; RV32-ILP32FD-NEXT: fmv.w.x fa0, a0 +; RV32-ILP32FD-NEXT: lui a0, 262144 +; RV32-ILP32FD-NEXT: fmv.w.x fa1, a0 +; RV32-ILP32FD-NEXT: lui a0, 263168 +; RV32-ILP32FD-NEXT: fmv.w.x fa2, a0 +; RV32-ILP32FD-NEXT: lui a0, 264192 +; RV32-ILP32FD-NEXT: fmv.w.x fa3, a0 +; RV32-ILP32FD-NEXT: lui a0, 264704 +; RV32-ILP32FD-NEXT: fmv.w.x fa4, a0 +; RV32-ILP32FD-NEXT: lui a0, 265216 +; RV32-ILP32FD-NEXT: fmv.w.x fa5, a0 +; RV32-ILP32FD-NEXT: lui a0, 265728 +; RV32-ILP32FD-NEXT: fmv.w.x fa6, a0 +; RV32-ILP32FD-NEXT: lui a0, 266240 +; RV32-ILP32FD-NEXT: fmv.w.x fa7, a0 ; RV32-ILP32FD-NEXT: lui a0, 266496 ; RV32-ILP32FD-NEXT: call callee_float_in_gpr_exhausted_fprs@plt ; RV32-ILP32FD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -142,22 +142,22 @@ ; RV32-ILP32FD-NEXT: addi sp, sp, -16 ; RV32-ILP32FD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32-ILP32FD-NEXT: lui a1, 267520 -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_0) -; RV32-ILP32FD-NEXT: flw fa0, %lo(.LCPI7_0)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_1) -; RV32-ILP32FD-NEXT: flw fa1, %lo(.LCPI7_1)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_2) -; RV32-ILP32FD-NEXT: flw fa2, %lo(.LCPI7_2)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_3) -; RV32-ILP32FD-NEXT: flw fa3, %lo(.LCPI7_3)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_4) -; RV32-ILP32FD-NEXT: flw fa4, %lo(.LCPI7_4)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_5) -; RV32-ILP32FD-NEXT: flw fa5, %lo(.LCPI7_5)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_6) -; RV32-ILP32FD-NEXT: flw fa6, %lo(.LCPI7_6)(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_7) -; RV32-ILP32FD-NEXT: flw fa7, %lo(.LCPI7_7)(a0) +; RV32-ILP32FD-NEXT: lui a0, 262144 +; RV32-ILP32FD-NEXT: fmv.w.x fa0, a0 +; RV32-ILP32FD-NEXT: lui a0, 264192 +; RV32-ILP32FD-NEXT: fmv.w.x fa1, a0 +; RV32-ILP32FD-NEXT: lui a0, 265216 +; RV32-ILP32FD-NEXT: fmv.w.x fa2, a0 +; RV32-ILP32FD-NEXT: lui a0, 266240 +; RV32-ILP32FD-NEXT: fmv.w.x fa3, a0 +; RV32-ILP32FD-NEXT: lui a0, 266496 +; RV32-ILP32FD-NEXT: fmv.w.x fa4, a0 +; RV32-ILP32FD-NEXT: lui a0, 266752 +; RV32-ILP32FD-NEXT: fmv.w.x fa5, a0 +; RV32-ILP32FD-NEXT: lui a0, 267008 +; RV32-ILP32FD-NEXT: fmv.w.x fa6, a0 +; RV32-ILP32FD-NEXT: lui a0, 267264 +; RV32-ILP32FD-NEXT: fmv.w.x fa7, a0 ; RV32-ILP32FD-NEXT: li a0, 1 ; RV32-ILP32FD-NEXT: li a2, 3 ; RV32-ILP32FD-NEXT: li a4, 5 @@ -180,8 +180,8 @@ define float @callee_float_ret() nounwind { ; RV32-ILP32FD-LABEL: callee_float_ret: ; RV32-ILP32FD: # %bb.0: -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI8_0) -; RV32-ILP32FD-NEXT: flw fa0, %lo(.LCPI8_0)(a0) +; RV32-ILP32FD-NEXT: lui a0, 260096 +; RV32-ILP32FD-NEXT: fmv.w.x fa0, a0 ; RV32-ILP32FD-NEXT: ret ret float 1.0 } diff --git a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll --- a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll +++ b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll @@ -124,16 +124,15 @@ define float @lower_constantpool(float %a) nounwind { ; RV32I-SMALL-LABEL: lower_constantpool: ; RV32I-SMALL: # %bb.0: -; RV32I-SMALL-NEXT: lui a0, %hi(.LCPI3_0) -; RV32I-SMALL-NEXT: flw ft0, %lo(.LCPI3_0)(a0) +; RV32I-SMALL-NEXT: lui a0, 260096 +; RV32I-SMALL-NEXT: fmv.w.x ft0, a0 ; RV32I-SMALL-NEXT: fadd.s fa0, fa0, ft0 ; RV32I-SMALL-NEXT: ret ; ; RV32I-MEDIUM-LABEL: lower_constantpool: ; RV32I-MEDIUM: # %bb.0: -; RV32I-MEDIUM-NEXT: .Lpcrel_hi3: -; RV32I-MEDIUM-NEXT: auipc a0, %pcrel_hi(.LCPI3_0) -; RV32I-MEDIUM-NEXT: flw ft0, %pcrel_lo(.Lpcrel_hi3)(a0) +; RV32I-MEDIUM-NEXT: lui a0, 260096 +; RV32I-MEDIUM-NEXT: fmv.w.x ft0, a0 ; RV32I-MEDIUM-NEXT: fadd.s fa0, fa0, ft0 ; RV32I-MEDIUM-NEXT: ret %1 = fadd float %a, 1.0 diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -521,9 +521,9 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IF-NEXT: lui a0, %hi(.LCPI12_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI12_0)(a0) ; RV32IF-NEXT: fmv.s fs0, fa0 +; RV32IF-NEXT: lui a0, 913408 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fle.s s0, ft0, fa0 ; RV32IF-NEXT: call __fixsfdi@plt ; RV32IF-NEXT: lui a3, 524288 @@ -531,8 +531,8 @@ ; RV32IF-NEXT: # %bb.1: # %start ; RV32IF-NEXT: lui a1, 524288 ; RV32IF-NEXT: .LBB12_2: # %start -; RV32IF-NEXT: lui a2, %hi(.LCPI12_1) -; RV32IF-NEXT: flw ft0, %lo(.LCPI12_1)(a2) +; RV32IF-NEXT: lui a2, %hi(.LCPI12_0) +; RV32IF-NEXT: flw ft0, %lo(.LCPI12_0)(a2) ; RV32IF-NEXT: flt.s a2, ft0, fs0 ; RV32IF-NEXT: beqz a2, .LBB12_4 ; RV32IF-NEXT: # %bb.3: @@ -1154,10 +1154,10 @@ ; RV32IF: # %bb.0: # %start ; RV32IF-NEXT: lui a0, %hi(.LCPI24_0) ; RV32IF-NEXT: flw ft0, %lo(.LCPI24_0)(a0) -; RV32IF-NEXT: lui a0, %hi(.LCPI24_1) -; RV32IF-NEXT: flw ft1, %lo(.LCPI24_1)(a0) -; RV32IF-NEXT: fmax.s ft0, fa0, ft0 -; RV32IF-NEXT: fmin.s ft0, ft0, ft1 +; RV32IF-NEXT: lui a0, 815104 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fmax.s ft1, fa0, ft1 +; RV32IF-NEXT: fmin.s ft0, ft1, ft0 ; RV32IF-NEXT: fcvt.w.s a0, ft0, rtz ; RV32IF-NEXT: feq.s a1, fa0, fa0 ; RV32IF-NEXT: seqz a1, a1 @@ -1169,10 +1169,10 @@ ; RV64IF: # %bb.0: # %start ; RV64IF-NEXT: lui a0, %hi(.LCPI24_0) ; RV64IF-NEXT: flw ft0, %lo(.LCPI24_0)(a0) -; RV64IF-NEXT: lui a0, %hi(.LCPI24_1) -; RV64IF-NEXT: flw ft1, %lo(.LCPI24_1)(a0) -; RV64IF-NEXT: fmax.s ft0, fa0, ft0 -; RV64IF-NEXT: fmin.s ft0, ft0, ft1 +; RV64IF-NEXT: lui a0, 815104 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fmax.s ft1, fa0, ft1 +; RV64IF-NEXT: fmin.s ft0, ft1, ft0 ; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz ; RV64IF-NEXT: feq.s a1, fa0, fa0 ; RV64IF-NEXT: seqz a1, a1 @@ -1436,11 +1436,11 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind { ; RV32IF-LABEL: fcvt_w_s_sat_i8: ; RV32IF: # %bb.0: # %start -; RV32IF-NEXT: lui a0, %hi(.LCPI28_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI28_0)(a0) -; RV32IF-NEXT: lui a0, %hi(.LCPI28_1) -; RV32IF-NEXT: flw ft1, %lo(.LCPI28_1)(a0) +; RV32IF-NEXT: lui a0, 798720 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fmax.s ft0, fa0, ft0 +; RV32IF-NEXT: lui a0, 274400 +; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: fmin.s ft0, ft0, ft1 ; RV32IF-NEXT: fcvt.w.s a0, ft0, rtz ; RV32IF-NEXT: feq.s a1, fa0, fa0 @@ -1451,11 +1451,11 @@ ; ; RV64IF-LABEL: fcvt_w_s_sat_i8: ; RV64IF: # %bb.0: # %start -; RV64IF-NEXT: lui a0, %hi(.LCPI28_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI28_0)(a0) -; RV64IF-NEXT: lui a0, %hi(.LCPI28_1) -; RV64IF-NEXT: flw ft1, %lo(.LCPI28_1)(a0) +; RV64IF-NEXT: lui a0, 798720 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fmax.s ft0, fa0, ft0 +; RV64IF-NEXT: lui a0, 274400 +; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: fmin.s ft0, ft0, ft1 ; RV64IF-NEXT: fcvt.l.s a0, ft0, rtz ; RV64IF-NEXT: feq.s a1, fa0, fa0 @@ -1584,21 +1584,21 @@ define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind { ; RV32IF-LABEL: fcvt_wu_s_sat_i8: ; RV32IF: # %bb.0: # %start -; RV32IF-NEXT: lui a0, %hi(.LCPI30_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI30_0)(a0) -; RV32IF-NEXT: fmv.w.x ft1, zero -; RV32IF-NEXT: fmax.s ft1, fa0, ft1 -; RV32IF-NEXT: fmin.s ft0, ft1, ft0 +; RV32IF-NEXT: fmv.w.x ft0, zero +; RV32IF-NEXT: fmax.s ft0, fa0, ft0 +; RV32IF-NEXT: lui a0, 276464 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fmin.s ft0, ft0, ft1 ; RV32IF-NEXT: fcvt.wu.s a0, ft0, rtz ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcvt_wu_s_sat_i8: ; RV64IF: # %bb.0: # %start -; RV64IF-NEXT: lui a0, %hi(.LCPI30_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI30_0)(a0) -; RV64IF-NEXT: fmv.w.x ft1, zero -; RV64IF-NEXT: fmax.s ft1, fa0, ft1 -; RV64IF-NEXT: fmin.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.w.x ft0, zero +; RV64IF-NEXT: fmax.s ft0, fa0, ft0 +; RV64IF-NEXT: lui a0, 276464 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fmin.s ft0, ft0, ft1 ; RV64IF-NEXT: fcvt.lu.s a0, ft0, rtz ; RV64IF-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/float-imm.ll b/llvm/test/CodeGen/RISCV/float-imm.ll --- a/llvm/test/CodeGen/RISCV/float-imm.ll +++ b/llvm/test/CodeGen/RISCV/float-imm.ll @@ -17,8 +17,8 @@ define float @float_imm_op(float %a) nounwind { ; CHECK-LABEL: float_imm_op: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI1_0)(a0) +; CHECK-NEXT: lui a0, 260096 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: fadd.s fa0, fa0, ft0 ; CHECK-NEXT: ret %1 = fadd float %a, 1.0 diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll @@ -640,8 +640,8 @@ define float @floor_f32(float %a) nounwind { ; RV32IF-LABEL: floor_f32: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a0, %hi(.LCPI17_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI17_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB17_2 @@ -654,8 +654,8 @@ ; ; RV64IF-LABEL: floor_f32: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a0, %hi(.LCPI17_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI17_0)(a0) +; RV64IF-NEXT: lui a0, 307200 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fabs.s ft1, fa0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB17_2 @@ -692,8 +692,8 @@ define float @ceil_f32(float %a) nounwind { ; RV32IF-LABEL: ceil_f32: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a0, %hi(.LCPI18_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI18_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB18_2 @@ -706,8 +706,8 @@ ; ; RV64IF-LABEL: ceil_f32: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a0, %hi(.LCPI18_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI18_0)(a0) +; RV64IF-NEXT: lui a0, 307200 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fabs.s ft1, fa0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB18_2 @@ -744,8 +744,8 @@ define float @trunc_f32(float %a) nounwind { ; RV32IF-LABEL: trunc_f32: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a0, %hi(.LCPI19_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI19_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB19_2 @@ -758,8 +758,8 @@ ; ; RV64IF-LABEL: trunc_f32: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a0, %hi(.LCPI19_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI19_0)(a0) +; RV64IF-NEXT: lui a0, 307200 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fabs.s ft1, fa0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB19_2 @@ -796,8 +796,8 @@ define float @rint_f32(float %a) nounwind { ; RV32IF-LABEL: rint_f32: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a0, %hi(.LCPI20_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI20_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB20_2 @@ -810,8 +810,8 @@ ; ; RV64IF-LABEL: rint_f32: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a0, %hi(.LCPI20_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI20_0)(a0) +; RV64IF-NEXT: lui a0, 307200 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fabs.s ft1, fa0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB20_2 @@ -880,8 +880,8 @@ define float @round_f32(float %a) nounwind { ; RV32IF-LABEL: round_f32: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a0, %hi(.LCPI22_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI22_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB22_2 @@ -894,8 +894,8 @@ ; ; RV64IF-LABEL: round_f32: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a0, %hi(.LCPI22_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI22_0)(a0) +; RV64IF-NEXT: lui a0, 307200 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fabs.s ft1, fa0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB22_2 @@ -932,8 +932,8 @@ define float @roundeven_f32(float %a) nounwind { ; RV32IF-LABEL: roundeven_f32: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a0, %hi(.LCPI23_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI23_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB23_2 @@ -946,8 +946,8 @@ ; ; RV64IF-LABEL: roundeven_f32: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a0, %hi(.LCPI23_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI23_0)(a0) +; RV64IF-NEXT: lui a0, 307200 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fabs.s ft1, fa0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB23_2 diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll --- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll @@ -25,9 +25,9 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IF-NEXT: lui a0, %hi(.LCPI1_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI1_0)(a0) ; RV32IF-NEXT: fmv.s fs0, fa0 +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB1_2 @@ -36,8 +36,8 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rdn ; RV32IF-NEXT: fsgnj.s fs0, ft0, fs0 ; RV32IF-NEXT: .LBB1_2: -; RV32IF-NEXT: lui a0, %hi(.LCPI1_1) -; RV32IF-NEXT: flw ft0, %lo(.LCPI1_1)(a0) +; RV32IF-NEXT: lui a0, 913408 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fle.s s0, ft0, fs0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixsfdi@plt @@ -46,8 +46,8 @@ ; RV32IF-NEXT: # %bb.3: ; RV32IF-NEXT: lui a1, 524288 ; RV32IF-NEXT: .LBB1_4: -; RV32IF-NEXT: lui a2, %hi(.LCPI1_2) -; RV32IF-NEXT: flw ft0, %lo(.LCPI1_2)(a2) +; RV32IF-NEXT: lui a2, %hi(.LCPI1_0) +; RV32IF-NEXT: flw ft0, %lo(.LCPI1_0)(a2) ; RV32IF-NEXT: flt.s a2, ft0, fs0 ; RV32IF-NEXT: beqz a2, .LBB1_6 ; RV32IF-NEXT: # %bb.5: @@ -102,9 +102,9 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IF-NEXT: lui a0, %hi(.LCPI3_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI3_0)(a0) ; RV32IF-NEXT: fmv.s fs0, fa0 +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB3_2 @@ -118,8 +118,8 @@ ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi@plt -; RV32IF-NEXT: lui a2, %hi(.LCPI3_1) -; RV32IF-NEXT: flw ft0, %lo(.LCPI3_1)(a2) +; RV32IF-NEXT: lui a2, %hi(.LCPI3_0) +; RV32IF-NEXT: flw ft0, %lo(.LCPI3_0)(a2) ; RV32IF-NEXT: and a0, s0, a0 ; RV32IF-NEXT: flt.s a2, ft0, fs0 ; RV32IF-NEXT: neg a2, a2 @@ -166,9 +166,9 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IF-NEXT: lui a0, %hi(.LCPI5_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI5_0)(a0) ; RV32IF-NEXT: fmv.s fs0, fa0 +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB5_2 @@ -177,8 +177,8 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rup ; RV32IF-NEXT: fsgnj.s fs0, ft0, fs0 ; RV32IF-NEXT: .LBB5_2: -; RV32IF-NEXT: lui a0, %hi(.LCPI5_1) -; RV32IF-NEXT: flw ft0, %lo(.LCPI5_1)(a0) +; RV32IF-NEXT: lui a0, 913408 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fle.s s0, ft0, fs0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixsfdi@plt @@ -187,8 +187,8 @@ ; RV32IF-NEXT: # %bb.3: ; RV32IF-NEXT: lui a1, 524288 ; RV32IF-NEXT: .LBB5_4: -; RV32IF-NEXT: lui a2, %hi(.LCPI5_2) -; RV32IF-NEXT: flw ft0, %lo(.LCPI5_2)(a2) +; RV32IF-NEXT: lui a2, %hi(.LCPI5_0) +; RV32IF-NEXT: flw ft0, %lo(.LCPI5_0)(a2) ; RV32IF-NEXT: flt.s a2, ft0, fs0 ; RV32IF-NEXT: beqz a2, .LBB5_6 ; RV32IF-NEXT: # %bb.5: @@ -243,9 +243,9 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IF-NEXT: lui a0, %hi(.LCPI7_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI7_0)(a0) ; RV32IF-NEXT: fmv.s fs0, fa0 +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB7_2 @@ -259,8 +259,8 @@ ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi@plt -; RV32IF-NEXT: lui a2, %hi(.LCPI7_1) -; RV32IF-NEXT: flw ft0, %lo(.LCPI7_1)(a2) +; RV32IF-NEXT: lui a2, %hi(.LCPI7_0) +; RV32IF-NEXT: flw ft0, %lo(.LCPI7_0)(a2) ; RV32IF-NEXT: and a0, s0, a0 ; RV32IF-NEXT: flt.s a2, ft0, fs0 ; RV32IF-NEXT: neg a2, a2 @@ -307,9 +307,9 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IF-NEXT: lui a0, %hi(.LCPI9_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI9_0)(a0) ; RV32IF-NEXT: fmv.s fs0, fa0 +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB9_2 @@ -318,8 +318,8 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rtz ; RV32IF-NEXT: fsgnj.s fs0, ft0, fs0 ; RV32IF-NEXT: .LBB9_2: -; RV32IF-NEXT: lui a0, %hi(.LCPI9_1) -; RV32IF-NEXT: flw ft0, %lo(.LCPI9_1)(a0) +; RV32IF-NEXT: lui a0, 913408 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fle.s s0, ft0, fs0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixsfdi@plt @@ -328,8 +328,8 @@ ; RV32IF-NEXT: # %bb.3: ; RV32IF-NEXT: lui a1, 524288 ; RV32IF-NEXT: .LBB9_4: -; RV32IF-NEXT: lui a2, %hi(.LCPI9_2) -; RV32IF-NEXT: flw ft0, %lo(.LCPI9_2)(a2) +; RV32IF-NEXT: lui a2, %hi(.LCPI9_0) +; RV32IF-NEXT: flw ft0, %lo(.LCPI9_0)(a2) ; RV32IF-NEXT: flt.s a2, ft0, fs0 ; RV32IF-NEXT: beqz a2, .LBB9_6 ; RV32IF-NEXT: # %bb.5: @@ -384,9 +384,9 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IF-NEXT: lui a0, %hi(.LCPI11_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI11_0)(a0) ; RV32IF-NEXT: fmv.s fs0, fa0 +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB11_2 @@ -400,8 +400,8 @@ ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi@plt -; RV32IF-NEXT: lui a2, %hi(.LCPI11_1) -; RV32IF-NEXT: flw ft0, %lo(.LCPI11_1)(a2) +; RV32IF-NEXT: lui a2, %hi(.LCPI11_0) +; RV32IF-NEXT: flw ft0, %lo(.LCPI11_0)(a2) ; RV32IF-NEXT: and a0, s0, a0 ; RV32IF-NEXT: flt.s a2, ft0, fs0 ; RV32IF-NEXT: neg a2, a2 @@ -448,9 +448,9 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IF-NEXT: lui a0, %hi(.LCPI13_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI13_0)(a0) ; RV32IF-NEXT: fmv.s fs0, fa0 +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB13_2 @@ -459,8 +459,8 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rmm ; RV32IF-NEXT: fsgnj.s fs0, ft0, fs0 ; RV32IF-NEXT: .LBB13_2: -; RV32IF-NEXT: lui a0, %hi(.LCPI13_1) -; RV32IF-NEXT: flw ft0, %lo(.LCPI13_1)(a0) +; RV32IF-NEXT: lui a0, 913408 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fle.s s0, ft0, fs0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixsfdi@plt @@ -469,8 +469,8 @@ ; RV32IF-NEXT: # %bb.3: ; RV32IF-NEXT: lui a1, 524288 ; RV32IF-NEXT: .LBB13_4: -; RV32IF-NEXT: lui a2, %hi(.LCPI13_2) -; RV32IF-NEXT: flw ft0, %lo(.LCPI13_2)(a2) +; RV32IF-NEXT: lui a2, %hi(.LCPI13_0) +; RV32IF-NEXT: flw ft0, %lo(.LCPI13_0)(a2) ; RV32IF-NEXT: flt.s a2, ft0, fs0 ; RV32IF-NEXT: beqz a2, .LBB13_6 ; RV32IF-NEXT: # %bb.5: @@ -525,9 +525,9 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IF-NEXT: lui a0, %hi(.LCPI15_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI15_0)(a0) ; RV32IF-NEXT: fmv.s fs0, fa0 +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB15_2 @@ -541,8 +541,8 @@ ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi@plt -; RV32IF-NEXT: lui a2, %hi(.LCPI15_1) -; RV32IF-NEXT: flw ft0, %lo(.LCPI15_1)(a2) +; RV32IF-NEXT: lui a2, %hi(.LCPI15_0) +; RV32IF-NEXT: flw ft0, %lo(.LCPI15_0)(a2) ; RV32IF-NEXT: and a0, s0, a0 ; RV32IF-NEXT: flt.s a2, ft0, fs0 ; RV32IF-NEXT: neg a2, a2 @@ -589,9 +589,9 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IF-NEXT: lui a0, %hi(.LCPI17_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI17_0)(a0) ; RV32IF-NEXT: fmv.s fs0, fa0 +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB17_2 @@ -600,8 +600,8 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rne ; RV32IF-NEXT: fsgnj.s fs0, ft0, fs0 ; RV32IF-NEXT: .LBB17_2: -; RV32IF-NEXT: lui a0, %hi(.LCPI17_1) -; RV32IF-NEXT: flw ft0, %lo(.LCPI17_1)(a0) +; RV32IF-NEXT: lui a0, 913408 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fle.s s0, ft0, fs0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixsfdi@plt @@ -610,8 +610,8 @@ ; RV32IF-NEXT: # %bb.3: ; RV32IF-NEXT: lui a1, 524288 ; RV32IF-NEXT: .LBB17_4: -; RV32IF-NEXT: lui a2, %hi(.LCPI17_2) -; RV32IF-NEXT: flw ft0, %lo(.LCPI17_2)(a2) +; RV32IF-NEXT: lui a2, %hi(.LCPI17_0) +; RV32IF-NEXT: flw ft0, %lo(.LCPI17_0)(a2) ; RV32IF-NEXT: flt.s a2, ft0, fs0 ; RV32IF-NEXT: beqz a2, .LBB17_6 ; RV32IF-NEXT: # %bb.5: @@ -666,9 +666,9 @@ ; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IF-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IF-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IF-NEXT: lui a0, %hi(.LCPI19_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI19_0)(a0) ; RV32IF-NEXT: fmv.s fs0, fa0 +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB19_2 @@ -682,8 +682,8 @@ ; RV32IF-NEXT: neg s0, a0 ; RV32IF-NEXT: fmv.s fa0, fs0 ; RV32IF-NEXT: call __fixunssfdi@plt -; RV32IF-NEXT: lui a2, %hi(.LCPI19_1) -; RV32IF-NEXT: flw ft0, %lo(.LCPI19_1)(a2) +; RV32IF-NEXT: lui a2, %hi(.LCPI19_0) +; RV32IF-NEXT: flw ft0, %lo(.LCPI19_0)(a2) ; RV32IF-NEXT: and a0, s0, a0 ; RV32IF-NEXT: flt.s a2, ft0, fs0 ; RV32IF-NEXT: neg a2, a2 diff --git a/llvm/test/CodeGen/RISCV/float-round-conv.ll b/llvm/test/CodeGen/RISCV/float-round-conv.ll --- a/llvm/test/CodeGen/RISCV/float-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/float-round-conv.ll @@ -52,12 +52,8 @@ define i64 @test_floor_si64(float %x) { ; RV32IF-LABEL: test_floor_si64: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: .cfi_def_cfa_offset 16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: .cfi_offset ra, -4 -; RV32IF-NEXT: lui a0, %hi(.LCPI3_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI3_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB3_2 @@ -66,6 +62,10 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rdn ; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0 ; RV32IF-NEXT: .LBB3_2: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: .cfi_def_cfa_offset 16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixsfdi@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -128,12 +128,8 @@ define i64 @test_floor_ui64(float %x) { ; RV32IF-LABEL: test_floor_ui64: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: .cfi_def_cfa_offset 16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: .cfi_offset ra, -4 -; RV32IF-NEXT: lui a0, %hi(.LCPI7_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI7_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB7_2 @@ -142,6 +138,10 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rdn ; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0 ; RV32IF-NEXT: .LBB7_2: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: .cfi_def_cfa_offset 16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixunssfdi@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -204,12 +204,8 @@ define i64 @test_ceil_si64(float %x) { ; RV32IF-LABEL: test_ceil_si64: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: .cfi_def_cfa_offset 16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: .cfi_offset ra, -4 -; RV32IF-NEXT: lui a0, %hi(.LCPI11_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI11_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB11_2 @@ -218,6 +214,10 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rup ; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0 ; RV32IF-NEXT: .LBB11_2: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: .cfi_def_cfa_offset 16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixsfdi@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -280,12 +280,8 @@ define i64 @test_ceil_ui64(float %x) { ; RV32IF-LABEL: test_ceil_ui64: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: .cfi_def_cfa_offset 16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: .cfi_offset ra, -4 -; RV32IF-NEXT: lui a0, %hi(.LCPI15_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI15_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB15_2 @@ -294,6 +290,10 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rup ; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0 ; RV32IF-NEXT: .LBB15_2: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: .cfi_def_cfa_offset 16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixunssfdi@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -356,12 +356,8 @@ define i64 @test_trunc_si64(float %x) { ; RV32IF-LABEL: test_trunc_si64: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: .cfi_def_cfa_offset 16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: .cfi_offset ra, -4 -; RV32IF-NEXT: lui a0, %hi(.LCPI19_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI19_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB19_2 @@ -370,6 +366,10 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rtz ; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0 ; RV32IF-NEXT: .LBB19_2: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: .cfi_def_cfa_offset 16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixsfdi@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -432,12 +432,8 @@ define i64 @test_trunc_ui64(float %x) { ; RV32IF-LABEL: test_trunc_ui64: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: .cfi_def_cfa_offset 16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: .cfi_offset ra, -4 -; RV32IF-NEXT: lui a0, %hi(.LCPI23_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI23_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB23_2 @@ -446,6 +442,10 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rtz ; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0 ; RV32IF-NEXT: .LBB23_2: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: .cfi_def_cfa_offset 16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixunssfdi@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -508,12 +508,8 @@ define i64 @test_round_si64(float %x) { ; RV32IF-LABEL: test_round_si64: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: .cfi_def_cfa_offset 16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: .cfi_offset ra, -4 -; RV32IF-NEXT: lui a0, %hi(.LCPI27_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI27_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB27_2 @@ -522,6 +518,10 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rmm ; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0 ; RV32IF-NEXT: .LBB27_2: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: .cfi_def_cfa_offset 16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixsfdi@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -584,12 +584,8 @@ define i64 @test_round_ui64(float %x) { ; RV32IF-LABEL: test_round_ui64: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: .cfi_def_cfa_offset 16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: .cfi_offset ra, -4 -; RV32IF-NEXT: lui a0, %hi(.LCPI31_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI31_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB31_2 @@ -598,6 +594,10 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rmm ; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0 ; RV32IF-NEXT: .LBB31_2: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: .cfi_def_cfa_offset 16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixunssfdi@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -660,12 +660,8 @@ define i64 @test_roundeven_si64(float %x) { ; RV32IF-LABEL: test_roundeven_si64: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: .cfi_def_cfa_offset 16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: .cfi_offset ra, -4 -; RV32IF-NEXT: lui a0, %hi(.LCPI35_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI35_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB35_2 @@ -674,6 +670,10 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rne ; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0 ; RV32IF-NEXT: .LBB35_2: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: .cfi_def_cfa_offset 16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixsfdi@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -736,12 +736,8 @@ define i64 @test_roundeven_ui64(float %x) { ; RV32IF-LABEL: test_roundeven_ui64: ; RV32IF: # %bb.0: -; RV32IF-NEXT: addi sp, sp, -16 -; RV32IF-NEXT: .cfi_def_cfa_offset 16 -; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IF-NEXT: .cfi_offset ra, -4 -; RV32IF-NEXT: lui a0, %hi(.LCPI39_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI39_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB39_2 @@ -750,6 +746,10 @@ ; RV32IF-NEXT: fcvt.s.w ft0, a0, rne ; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0 ; RV32IF-NEXT: .LBB39_2: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: .cfi_def_cfa_offset 16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: .cfi_offset ra, -4 ; RV32IF-NEXT: call __fixunssfdi@plt ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -788,8 +788,8 @@ ; RV64IFD-NEXT: ret ; RV32IF-LABEL: test_floor_float: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a0, %hi(.LCPI40_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI40_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB40_2 @@ -802,8 +802,8 @@ ; ; RV64IF-LABEL: test_floor_float: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a0, %hi(.LCPI40_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI40_0)(a0) +; RV64IF-NEXT: lui a0, 307200 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fabs.s ft1, fa0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB40_2 @@ -841,8 +841,8 @@ ; RV64IFD-NEXT: ret ; RV32IF-LABEL: test_ceil_float: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a0, %hi(.LCPI41_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI41_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB41_2 @@ -855,8 +855,8 @@ ; ; RV64IF-LABEL: test_ceil_float: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a0, %hi(.LCPI41_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI41_0)(a0) +; RV64IF-NEXT: lui a0, 307200 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fabs.s ft1, fa0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB41_2 @@ -894,8 +894,8 @@ ; RV64IFD-NEXT: ret ; RV32IF-LABEL: test_trunc_float: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a0, %hi(.LCPI42_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI42_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB42_2 @@ -908,8 +908,8 @@ ; ; RV64IF-LABEL: test_trunc_float: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a0, %hi(.LCPI42_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI42_0)(a0) +; RV64IF-NEXT: lui a0, 307200 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fabs.s ft1, fa0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB42_2 @@ -947,8 +947,8 @@ ; RV64IFD-NEXT: ret ; RV32IF-LABEL: test_round_float: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a0, %hi(.LCPI43_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI43_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB43_2 @@ -961,8 +961,8 @@ ; ; RV64IF-LABEL: test_round_float: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a0, %hi(.LCPI43_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI43_0)(a0) +; RV64IF-NEXT: lui a0, 307200 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fabs.s ft1, fa0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB43_2 @@ -1000,8 +1000,8 @@ ; RV64IFD-NEXT: ret ; RV32IF-LABEL: test_roundeven_float: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a0, %hi(.LCPI44_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI44_0)(a0) +; RV32IF-NEXT: lui a0, 307200 +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fabs.s ft1, fa0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: beqz a0, .LBB44_2 @@ -1014,8 +1014,8 @@ ; ; RV64IF-LABEL: test_roundeven_float: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a0, %hi(.LCPI44_0) -; RV64IF-NEXT: flw ft0, %lo(.LCPI44_0)(a0) +; RV64IF-NEXT: lui a0, 307200 +; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fabs.s ft1, fa0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: beqz a0, .LBB44_2 diff --git a/llvm/test/CodeGen/RISCV/float-select-verify.ll b/llvm/test/CodeGen/RISCV/float-select-verify.ll --- a/llvm/test/CodeGen/RISCV/float-select-verify.ll +++ b/llvm/test/CodeGen/RISCV/float-select-verify.ll @@ -13,10 +13,10 @@ ; CHECK-NEXT: [[FMV_W_X:%[0-9]+]]:fpr32 = FMV_W_X [[COPY]] ; CHECK-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY2]], 1 ; CHECK-NEXT: [[FMV_W_X1:%[0-9]+]]:fpr32 = FMV_W_X [[COPY1]] - ; CHECK-NEXT: [[LUI:%[0-9]+]]:gpr = LUI target-flags(riscv-hi) %const.0 - ; CHECK-NEXT: [[FLW:%[0-9]+]]:fpr32 = FLW killed [[LUI]], target-flags(riscv-lo) %const.0 :: (load (s32) from constant-pool) + ; CHECK-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 307200 + ; CHECK-NEXT: [[FMV_W_X2:%[0-9]+]]:fpr32 = FMV_W_X killed [[LUI]] ; CHECK-NEXT: [[FSGNJX_S:%[0-9]+]]:fpr32 = FSGNJX_S [[FMV_W_X1]], [[FMV_W_X1]] - ; CHECK-NEXT: [[FLT_S:%[0-9]+]]:gpr = nofpexcept FLT_S [[FSGNJX_S]], [[FLW]] + ; CHECK-NEXT: [[FLT_S:%[0-9]+]]:gpr = nofpexcept FLT_S [[FSGNJX_S]], [[FMV_W_X2]] ; CHECK-NEXT: BEQ [[FLT_S]], $x0, %bb.2 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.1.entry: @@ -41,7 +41,7 @@ ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[PHI1:%[0-9]+]]:fpr32 = PHI [[PHI]], %bb.2, [[FMV_W_X1]], %bb.3 ; CHECK-NEXT: [[FSGNJX_S1:%[0-9]+]]:fpr32 = FSGNJX_S [[FMV_W_X]], [[FMV_W_X]] - ; CHECK-NEXT: [[FLT_S1:%[0-9]+]]:gpr = nofpexcept FLT_S [[FSGNJX_S1]], [[FLW]] + ; CHECK-NEXT: [[FLT_S1:%[0-9]+]]:gpr = nofpexcept FLT_S [[FSGNJX_S1]], [[FMV_W_X2]] ; CHECK-NEXT: BEQ [[FLT_S1]], $x0, %bb.6 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.5.entry: diff --git a/llvm/test/CodeGen/RISCV/fp-imm.ll b/llvm/test/CodeGen/RISCV/fp-imm.ll --- a/llvm/test/CodeGen/RISCV/fp-imm.ll +++ b/llvm/test/CodeGen/RISCV/fp-imm.ll @@ -34,26 +34,26 @@ define float @f32_negative_zero(ptr %pf) nounwind { ; RV32F-LABEL: f32_negative_zero: ; RV32F: # %bb.0: -; RV32F-NEXT: fmv.w.x ft0, zero -; RV32F-NEXT: fneg.s fa0, ft0 +; RV32F-NEXT: lui a0, 524288 +; RV32F-NEXT: fmv.w.x fa0, a0 ; RV32F-NEXT: ret ; ; RV32D-LABEL: f32_negative_zero: ; RV32D: # %bb.0: -; RV32D-NEXT: fmv.w.x ft0, zero -; RV32D-NEXT: fneg.s fa0, ft0 +; RV32D-NEXT: lui a0, 524288 +; RV32D-NEXT: fmv.w.x fa0, a0 ; RV32D-NEXT: ret ; ; RV64F-LABEL: f32_negative_zero: ; RV64F: # %bb.0: -; RV64F-NEXT: fmv.w.x ft0, zero -; RV64F-NEXT: fneg.s fa0, ft0 +; RV64F-NEXT: lui a0, 524288 +; RV64F-NEXT: fmv.w.x fa0, a0 ; RV64F-NEXT: ret ; ; RV64D-LABEL: f32_negative_zero: ; RV64D: # %bb.0: -; RV64D-NEXT: fmv.w.x ft0, zero -; RV64D-NEXT: fneg.s fa0, ft0 +; RV64D-NEXT: lui a0, 524288 +; RV64D-NEXT: fmv.w.x fa0, a0 ; RV64D-NEXT: ret ret float -0.0 } diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -83,15 +83,15 @@ define i16 @fcvt_si_h_sat(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_si_h_sat: ; RV32IZFH: # %bb.0: # %start +; RV32IZFH-NEXT: fcvt.s.h ft0, fa0 ; RV32IZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI1_0)(a0) -; RV32IZFH-NEXT: lui a0, %hi(.LCPI1_1) -; RV32IZFH-NEXT: flw ft1, %lo(.LCPI1_1)(a0) -; RV32IZFH-NEXT: fcvt.s.h ft2, fa0 -; RV32IZFH-NEXT: fmax.s ft0, ft2, ft0 -; RV32IZFH-NEXT: fmin.s ft0, ft0, ft1 -; RV32IZFH-NEXT: fcvt.w.s a0, ft0, rtz -; RV32IZFH-NEXT: feq.s a1, ft2, ft2 +; RV32IZFH-NEXT: flw ft1, %lo(.LCPI1_0)(a0) +; RV32IZFH-NEXT: lui a0, 815104 +; RV32IZFH-NEXT: fmv.w.x ft2, a0 +; RV32IZFH-NEXT: fmax.s ft2, ft0, ft2 +; RV32IZFH-NEXT: fmin.s ft1, ft2, ft1 +; RV32IZFH-NEXT: fcvt.w.s a0, ft1, rtz +; RV32IZFH-NEXT: feq.s a1, ft0, ft0 ; RV32IZFH-NEXT: seqz a1, a1 ; RV32IZFH-NEXT: addi a1, a1, -1 ; RV32IZFH-NEXT: and a0, a1, a0 @@ -99,15 +99,15 @@ ; ; RV64IZFH-LABEL: fcvt_si_h_sat: ; RV64IZFH: # %bb.0: # %start +; RV64IZFH-NEXT: fcvt.s.h ft0, fa0 ; RV64IZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV64IZFH-NEXT: flw ft0, %lo(.LCPI1_0)(a0) -; RV64IZFH-NEXT: lui a0, %hi(.LCPI1_1) -; RV64IZFH-NEXT: flw ft1, %lo(.LCPI1_1)(a0) -; RV64IZFH-NEXT: fcvt.s.h ft2, fa0 -; RV64IZFH-NEXT: fmax.s ft0, ft2, ft0 -; RV64IZFH-NEXT: fmin.s ft0, ft0, ft1 -; RV64IZFH-NEXT: fcvt.l.s a0, ft0, rtz -; RV64IZFH-NEXT: feq.s a1, ft2, ft2 +; RV64IZFH-NEXT: flw ft1, %lo(.LCPI1_0)(a0) +; RV64IZFH-NEXT: lui a0, 815104 +; RV64IZFH-NEXT: fmv.w.x ft2, a0 +; RV64IZFH-NEXT: fmax.s ft2, ft0, ft2 +; RV64IZFH-NEXT: fmin.s ft1, ft2, ft1 +; RV64IZFH-NEXT: fcvt.l.s a0, ft1, rtz +; RV64IZFH-NEXT: feq.s a1, ft0, ft0 ; RV64IZFH-NEXT: seqz a1, a1 ; RV64IZFH-NEXT: addi a1, a1, -1 ; RV64IZFH-NEXT: and a0, a1, a0 @@ -115,15 +115,15 @@ ; ; RV32IDZFH-LABEL: fcvt_si_h_sat: ; RV32IDZFH: # %bb.0: # %start +; RV32IDZFH-NEXT: fcvt.s.h ft0, fa0 ; RV32IDZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV32IDZFH-NEXT: flw ft0, %lo(.LCPI1_0)(a0) -; RV32IDZFH-NEXT: lui a0, %hi(.LCPI1_1) -; RV32IDZFH-NEXT: flw ft1, %lo(.LCPI1_1)(a0) -; RV32IDZFH-NEXT: fcvt.s.h ft2, fa0 -; RV32IDZFH-NEXT: fmax.s ft0, ft2, ft0 -; RV32IDZFH-NEXT: fmin.s ft0, ft0, ft1 -; RV32IDZFH-NEXT: fcvt.w.s a0, ft0, rtz -; RV32IDZFH-NEXT: feq.s a1, ft2, ft2 +; RV32IDZFH-NEXT: flw ft1, %lo(.LCPI1_0)(a0) +; RV32IDZFH-NEXT: lui a0, 815104 +; RV32IDZFH-NEXT: fmv.w.x ft2, a0 +; RV32IDZFH-NEXT: fmax.s ft2, ft0, ft2 +; RV32IDZFH-NEXT: fmin.s ft1, ft2, ft1 +; RV32IDZFH-NEXT: fcvt.w.s a0, ft1, rtz +; RV32IDZFH-NEXT: feq.s a1, ft0, ft0 ; RV32IDZFH-NEXT: seqz a1, a1 ; RV32IDZFH-NEXT: addi a1, a1, -1 ; RV32IDZFH-NEXT: and a0, a1, a0 @@ -131,15 +131,15 @@ ; ; RV64IDZFH-LABEL: fcvt_si_h_sat: ; RV64IDZFH: # %bb.0: # %start +; RV64IDZFH-NEXT: fcvt.s.h ft0, fa0 ; RV64IDZFH-NEXT: lui a0, %hi(.LCPI1_0) -; RV64IDZFH-NEXT: flw ft0, %lo(.LCPI1_0)(a0) -; RV64IDZFH-NEXT: lui a0, %hi(.LCPI1_1) -; RV64IDZFH-NEXT: flw ft1, %lo(.LCPI1_1)(a0) -; RV64IDZFH-NEXT: fcvt.s.h ft2, fa0 -; RV64IDZFH-NEXT: fmax.s ft0, ft2, ft0 -; RV64IDZFH-NEXT: fmin.s ft0, ft0, ft1 -; RV64IDZFH-NEXT: fcvt.l.s a0, ft0, rtz -; RV64IDZFH-NEXT: feq.s a1, ft2, ft2 +; RV64IDZFH-NEXT: flw ft1, %lo(.LCPI1_0)(a0) +; RV64IDZFH-NEXT: lui a0, 815104 +; RV64IDZFH-NEXT: fmv.w.x ft2, a0 +; RV64IDZFH-NEXT: fmax.s ft2, ft0, ft2 +; RV64IDZFH-NEXT: fmin.s ft1, ft2, ft1 +; RV64IDZFH-NEXT: fcvt.l.s a0, ft1, rtz +; RV64IDZFH-NEXT: feq.s a1, ft0, ft0 ; RV64IDZFH-NEXT: seqz a1, a1 ; RV64IDZFH-NEXT: addi a1, a1, -1 ; RV64IDZFH-NEXT: and a0, a1, a0 @@ -233,15 +233,15 @@ ; ; CHECK32-IZFHMIN-LABEL: fcvt_si_h_sat: ; CHECK32-IZFHMIN: # %bb.0: # %start +; CHECK32-IZFHMIN-NEXT: fcvt.s.h ft0, fa0 ; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK32-IZFHMIN-NEXT: flw ft0, %lo(.LCPI1_0)(a0) -; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI1_1) -; CHECK32-IZFHMIN-NEXT: flw ft1, %lo(.LCPI1_1)(a0) -; CHECK32-IZFHMIN-NEXT: fcvt.s.h ft2, fa0 -; CHECK32-IZFHMIN-NEXT: fmax.s ft0, ft2, ft0 -; CHECK32-IZFHMIN-NEXT: fmin.s ft0, ft0, ft1 -; CHECK32-IZFHMIN-NEXT: fcvt.w.s a0, ft0, rtz -; CHECK32-IZFHMIN-NEXT: feq.s a1, ft2, ft2 +; CHECK32-IZFHMIN-NEXT: flw ft1, %lo(.LCPI1_0)(a0) +; CHECK32-IZFHMIN-NEXT: lui a0, 815104 +; CHECK32-IZFHMIN-NEXT: fmv.w.x ft2, a0 +; CHECK32-IZFHMIN-NEXT: fmax.s ft2, ft0, ft2 +; CHECK32-IZFHMIN-NEXT: fmin.s ft1, ft2, ft1 +; CHECK32-IZFHMIN-NEXT: fcvt.w.s a0, ft1, rtz +; CHECK32-IZFHMIN-NEXT: feq.s a1, ft0, ft0 ; CHECK32-IZFHMIN-NEXT: seqz a1, a1 ; CHECK32-IZFHMIN-NEXT: addi a1, a1, -1 ; CHECK32-IZFHMIN-NEXT: and a0, a1, a0 @@ -249,15 +249,15 @@ ; ; CHECK64-IZFHMIN-LABEL: fcvt_si_h_sat: ; CHECK64-IZFHMIN: # %bb.0: # %start +; CHECK64-IZFHMIN-NEXT: fcvt.s.h ft0, fa0 ; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI1_0) -; CHECK64-IZFHMIN-NEXT: flw ft0, %lo(.LCPI1_0)(a0) -; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI1_1) -; CHECK64-IZFHMIN-NEXT: flw ft1, %lo(.LCPI1_1)(a0) -; CHECK64-IZFHMIN-NEXT: fcvt.s.h ft2, fa0 -; CHECK64-IZFHMIN-NEXT: fmax.s ft0, ft2, ft0 -; CHECK64-IZFHMIN-NEXT: fmin.s ft0, ft0, ft1 -; CHECK64-IZFHMIN-NEXT: fcvt.l.s a0, ft0, rtz -; CHECK64-IZFHMIN-NEXT: feq.s a1, ft2, ft2 +; CHECK64-IZFHMIN-NEXT: flw ft1, %lo(.LCPI1_0)(a0) +; CHECK64-IZFHMIN-NEXT: lui a0, 815104 +; CHECK64-IZFHMIN-NEXT: fmv.w.x ft2, a0 +; CHECK64-IZFHMIN-NEXT: fmax.s ft2, ft0, ft2 +; CHECK64-IZFHMIN-NEXT: fmin.s ft1, ft2, ft1 +; CHECK64-IZFHMIN-NEXT: fcvt.l.s a0, ft1, rtz +; CHECK64-IZFHMIN-NEXT: feq.s a1, ft0, ft0 ; CHECK64-IZFHMIN-NEXT: seqz a1, a1 ; CHECK64-IZFHMIN-NEXT: addi a1, a1, -1 ; CHECK64-IZFHMIN-NEXT: and a0, a1, a0 @@ -1032,9 +1032,9 @@ ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IZFH-NEXT: lui a0, %hi(.LCPI10_0) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: lui a0, 913408 +; RV32IZFH-NEXT: fmv.w.x ft0, a0 ; RV32IZFH-NEXT: fle.s s0, ft0, fs0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixsfdi@plt @@ -1043,8 +1043,8 @@ ; RV32IZFH-NEXT: # %bb.1: # %start ; RV32IZFH-NEXT: lui a1, 524288 ; RV32IZFH-NEXT: .LBB10_2: # %start -; RV32IZFH-NEXT: lui a2, %hi(.LCPI10_1) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI10_1)(a2) +; RV32IZFH-NEXT: lui a2, %hi(.LCPI10_0) +; RV32IZFH-NEXT: flw ft0, %lo(.LCPI10_0)(a2) ; RV32IZFH-NEXT: flt.s a2, ft0, fs0 ; RV32IZFH-NEXT: beqz a2, .LBB10_4 ; RV32IZFH-NEXT: # %bb.3: @@ -1080,9 +1080,9 @@ ; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IDZFH-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IDZFH-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill -; RV32IDZFH-NEXT: lui a0, %hi(.LCPI10_0) -; RV32IDZFH-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; RV32IDZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IDZFH-NEXT: lui a0, 913408 +; RV32IDZFH-NEXT: fmv.w.x ft0, a0 ; RV32IDZFH-NEXT: fle.s s0, ft0, fs0 ; RV32IDZFH-NEXT: fmv.s fa0, fs0 ; RV32IDZFH-NEXT: call __fixsfdi@plt @@ -1091,8 +1091,8 @@ ; RV32IDZFH-NEXT: # %bb.1: # %start ; RV32IDZFH-NEXT: lui a1, 524288 ; RV32IDZFH-NEXT: .LBB10_2: # %start -; RV32IDZFH-NEXT: lui a2, %hi(.LCPI10_1) -; RV32IDZFH-NEXT: flw ft0, %lo(.LCPI10_1)(a2) +; RV32IDZFH-NEXT: lui a2, %hi(.LCPI10_0) +; RV32IDZFH-NEXT: flw ft0, %lo(.LCPI10_0)(a2) ; RV32IDZFH-NEXT: flt.s a2, ft0, fs0 ; RV32IDZFH-NEXT: beqz a2, .LBB10_4 ; RV32IDZFH-NEXT: # %bb.3: @@ -1241,9 +1241,9 @@ ; RV32IFZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IFZFHMIN-NEXT: lui a0, %hi(.LCPI10_0) -; RV32IFZFHMIN-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; RV32IFZFHMIN-NEXT: fcvt.s.h fs0, fa0 +; RV32IFZFHMIN-NEXT: lui a0, 913408 +; RV32IFZFHMIN-NEXT: fmv.w.x ft0, a0 ; RV32IFZFHMIN-NEXT: fle.s s0, ft0, fs0 ; RV32IFZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IFZFHMIN-NEXT: call __fixsfdi@plt @@ -1252,8 +1252,8 @@ ; RV32IFZFHMIN-NEXT: # %bb.1: # %start ; RV32IFZFHMIN-NEXT: lui a1, 524288 ; RV32IFZFHMIN-NEXT: .LBB10_2: # %start -; RV32IFZFHMIN-NEXT: lui a2, %hi(.LCPI10_1) -; RV32IFZFHMIN-NEXT: flw ft0, %lo(.LCPI10_1)(a2) +; RV32IFZFHMIN-NEXT: lui a2, %hi(.LCPI10_0) +; RV32IFZFHMIN-NEXT: flw ft0, %lo(.LCPI10_0)(a2) ; RV32IFZFHMIN-NEXT: flt.s a2, ft0, fs0 ; RV32IFZFHMIN-NEXT: beqz a2, .LBB10_4 ; RV32IFZFHMIN-NEXT: # %bb.3: @@ -1290,9 +1290,9 @@ ; RV32IDZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IDZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IDZFHMIN-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill -; RV32IDZFHMIN-NEXT: lui a0, %hi(.LCPI10_0) -; RV32IDZFHMIN-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; RV32IDZFHMIN-NEXT: fcvt.s.h fs0, fa0 +; RV32IDZFHMIN-NEXT: lui a0, 913408 +; RV32IDZFHMIN-NEXT: fmv.w.x ft0, a0 ; RV32IDZFHMIN-NEXT: fle.s s0, ft0, fs0 ; RV32IDZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IDZFHMIN-NEXT: call __fixsfdi@plt @@ -1301,8 +1301,8 @@ ; RV32IDZFHMIN-NEXT: # %bb.1: # %start ; RV32IDZFHMIN-NEXT: lui a1, 524288 ; RV32IDZFHMIN-NEXT: .LBB10_2: # %start -; RV32IDZFHMIN-NEXT: lui a2, %hi(.LCPI10_1) -; RV32IDZFHMIN-NEXT: flw ft0, %lo(.LCPI10_1)(a2) +; RV32IDZFHMIN-NEXT: lui a2, %hi(.LCPI10_0) +; RV32IDZFHMIN-NEXT: flw ft0, %lo(.LCPI10_0)(a2) ; RV32IDZFHMIN-NEXT: flt.s a2, ft0, fs0 ; RV32IDZFHMIN-NEXT: beqz a2, .LBB10_4 ; RV32IDZFHMIN-NEXT: # %bb.3: @@ -2785,15 +2785,15 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_w_s_sat_i16: ; RV32IZFH: # %bb.0: # %start +; RV32IZFH-NEXT: fcvt.s.h ft0, fa0 ; RV32IZFH-NEXT: lui a0, %hi(.LCPI32_0) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI32_0)(a0) -; RV32IZFH-NEXT: lui a0, %hi(.LCPI32_1) -; RV32IZFH-NEXT: flw ft1, %lo(.LCPI32_1)(a0) -; RV32IZFH-NEXT: fcvt.s.h ft2, fa0 -; RV32IZFH-NEXT: fmax.s ft0, ft2, ft0 -; RV32IZFH-NEXT: fmin.s ft0, ft0, ft1 -; RV32IZFH-NEXT: fcvt.w.s a0, ft0, rtz -; RV32IZFH-NEXT: feq.s a1, ft2, ft2 +; RV32IZFH-NEXT: flw ft1, %lo(.LCPI32_0)(a0) +; RV32IZFH-NEXT: lui a0, 815104 +; RV32IZFH-NEXT: fmv.w.x ft2, a0 +; RV32IZFH-NEXT: fmax.s ft2, ft0, ft2 +; RV32IZFH-NEXT: fmin.s ft1, ft2, ft1 +; RV32IZFH-NEXT: fcvt.w.s a0, ft1, rtz +; RV32IZFH-NEXT: feq.s a1, ft0, ft0 ; RV32IZFH-NEXT: seqz a1, a1 ; RV32IZFH-NEXT: addi a1, a1, -1 ; RV32IZFH-NEXT: and a0, a1, a0 @@ -2801,15 +2801,15 @@ ; ; RV64IZFH-LABEL: fcvt_w_s_sat_i16: ; RV64IZFH: # %bb.0: # %start +; RV64IZFH-NEXT: fcvt.s.h ft0, fa0 ; RV64IZFH-NEXT: lui a0, %hi(.LCPI32_0) -; RV64IZFH-NEXT: flw ft0, %lo(.LCPI32_0)(a0) -; RV64IZFH-NEXT: lui a0, %hi(.LCPI32_1) -; RV64IZFH-NEXT: flw ft1, %lo(.LCPI32_1)(a0) -; RV64IZFH-NEXT: fcvt.s.h ft2, fa0 -; RV64IZFH-NEXT: fmax.s ft0, ft2, ft0 -; RV64IZFH-NEXT: fmin.s ft0, ft0, ft1 -; RV64IZFH-NEXT: fcvt.l.s a0, ft0, rtz -; RV64IZFH-NEXT: feq.s a1, ft2, ft2 +; RV64IZFH-NEXT: flw ft1, %lo(.LCPI32_0)(a0) +; RV64IZFH-NEXT: lui a0, 815104 +; RV64IZFH-NEXT: fmv.w.x ft2, a0 +; RV64IZFH-NEXT: fmax.s ft2, ft0, ft2 +; RV64IZFH-NEXT: fmin.s ft1, ft2, ft1 +; RV64IZFH-NEXT: fcvt.l.s a0, ft1, rtz +; RV64IZFH-NEXT: feq.s a1, ft0, ft0 ; RV64IZFH-NEXT: seqz a1, a1 ; RV64IZFH-NEXT: addi a1, a1, -1 ; RV64IZFH-NEXT: and a0, a1, a0 @@ -2817,15 +2817,15 @@ ; ; RV32IDZFH-LABEL: fcvt_w_s_sat_i16: ; RV32IDZFH: # %bb.0: # %start +; RV32IDZFH-NEXT: fcvt.s.h ft0, fa0 ; RV32IDZFH-NEXT: lui a0, %hi(.LCPI32_0) -; RV32IDZFH-NEXT: flw ft0, %lo(.LCPI32_0)(a0) -; RV32IDZFH-NEXT: lui a0, %hi(.LCPI32_1) -; RV32IDZFH-NEXT: flw ft1, %lo(.LCPI32_1)(a0) -; RV32IDZFH-NEXT: fcvt.s.h ft2, fa0 -; RV32IDZFH-NEXT: fmax.s ft0, ft2, ft0 -; RV32IDZFH-NEXT: fmin.s ft0, ft0, ft1 -; RV32IDZFH-NEXT: fcvt.w.s a0, ft0, rtz -; RV32IDZFH-NEXT: feq.s a1, ft2, ft2 +; RV32IDZFH-NEXT: flw ft1, %lo(.LCPI32_0)(a0) +; RV32IDZFH-NEXT: lui a0, 815104 +; RV32IDZFH-NEXT: fmv.w.x ft2, a0 +; RV32IDZFH-NEXT: fmax.s ft2, ft0, ft2 +; RV32IDZFH-NEXT: fmin.s ft1, ft2, ft1 +; RV32IDZFH-NEXT: fcvt.w.s a0, ft1, rtz +; RV32IDZFH-NEXT: feq.s a1, ft0, ft0 ; RV32IDZFH-NEXT: seqz a1, a1 ; RV32IDZFH-NEXT: addi a1, a1, -1 ; RV32IDZFH-NEXT: and a0, a1, a0 @@ -2833,15 +2833,15 @@ ; ; RV64IDZFH-LABEL: fcvt_w_s_sat_i16: ; RV64IDZFH: # %bb.0: # %start +; RV64IDZFH-NEXT: fcvt.s.h ft0, fa0 ; RV64IDZFH-NEXT: lui a0, %hi(.LCPI32_0) -; RV64IDZFH-NEXT: flw ft0, %lo(.LCPI32_0)(a0) -; RV64IDZFH-NEXT: lui a0, %hi(.LCPI32_1) -; RV64IDZFH-NEXT: flw ft1, %lo(.LCPI32_1)(a0) -; RV64IDZFH-NEXT: fcvt.s.h ft2, fa0 -; RV64IDZFH-NEXT: fmax.s ft0, ft2, ft0 -; RV64IDZFH-NEXT: fmin.s ft0, ft0, ft1 -; RV64IDZFH-NEXT: fcvt.l.s a0, ft0, rtz -; RV64IDZFH-NEXT: feq.s a1, ft2, ft2 +; RV64IDZFH-NEXT: flw ft1, %lo(.LCPI32_0)(a0) +; RV64IDZFH-NEXT: lui a0, 815104 +; RV64IDZFH-NEXT: fmv.w.x ft2, a0 +; RV64IDZFH-NEXT: fmax.s ft2, ft0, ft2 +; RV64IDZFH-NEXT: fmin.s ft1, ft2, ft1 +; RV64IDZFH-NEXT: fcvt.l.s a0, ft1, rtz +; RV64IDZFH-NEXT: feq.s a1, ft0, ft0 ; RV64IDZFH-NEXT: seqz a1, a1 ; RV64IDZFH-NEXT: addi a1, a1, -1 ; RV64IDZFH-NEXT: and a0, a1, a0 @@ -2939,15 +2939,15 @@ ; ; CHECK32-IZFHMIN-LABEL: fcvt_w_s_sat_i16: ; CHECK32-IZFHMIN: # %bb.0: # %start +; CHECK32-IZFHMIN-NEXT: fcvt.s.h ft0, fa0 ; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI32_0) -; CHECK32-IZFHMIN-NEXT: flw ft0, %lo(.LCPI32_0)(a0) -; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI32_1) -; CHECK32-IZFHMIN-NEXT: flw ft1, %lo(.LCPI32_1)(a0) -; CHECK32-IZFHMIN-NEXT: fcvt.s.h ft2, fa0 -; CHECK32-IZFHMIN-NEXT: fmax.s ft0, ft2, ft0 -; CHECK32-IZFHMIN-NEXT: fmin.s ft0, ft0, ft1 -; CHECK32-IZFHMIN-NEXT: fcvt.w.s a0, ft0, rtz -; CHECK32-IZFHMIN-NEXT: feq.s a1, ft2, ft2 +; CHECK32-IZFHMIN-NEXT: flw ft1, %lo(.LCPI32_0)(a0) +; CHECK32-IZFHMIN-NEXT: lui a0, 815104 +; CHECK32-IZFHMIN-NEXT: fmv.w.x ft2, a0 +; CHECK32-IZFHMIN-NEXT: fmax.s ft2, ft0, ft2 +; CHECK32-IZFHMIN-NEXT: fmin.s ft1, ft2, ft1 +; CHECK32-IZFHMIN-NEXT: fcvt.w.s a0, ft1, rtz +; CHECK32-IZFHMIN-NEXT: feq.s a1, ft0, ft0 ; CHECK32-IZFHMIN-NEXT: seqz a1, a1 ; CHECK32-IZFHMIN-NEXT: addi a1, a1, -1 ; CHECK32-IZFHMIN-NEXT: and a0, a1, a0 @@ -2955,15 +2955,15 @@ ; ; CHECK64-IZFHMIN-LABEL: fcvt_w_s_sat_i16: ; CHECK64-IZFHMIN: # %bb.0: # %start +; CHECK64-IZFHMIN-NEXT: fcvt.s.h ft0, fa0 ; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI32_0) -; CHECK64-IZFHMIN-NEXT: flw ft0, %lo(.LCPI32_0)(a0) -; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI32_1) -; CHECK64-IZFHMIN-NEXT: flw ft1, %lo(.LCPI32_1)(a0) -; CHECK64-IZFHMIN-NEXT: fcvt.s.h ft2, fa0 -; CHECK64-IZFHMIN-NEXT: fmax.s ft0, ft2, ft0 -; CHECK64-IZFHMIN-NEXT: fmin.s ft0, ft0, ft1 -; CHECK64-IZFHMIN-NEXT: fcvt.l.s a0, ft0, rtz -; CHECK64-IZFHMIN-NEXT: feq.s a1, ft2, ft2 +; CHECK64-IZFHMIN-NEXT: flw ft1, %lo(.LCPI32_0)(a0) +; CHECK64-IZFHMIN-NEXT: lui a0, 815104 +; CHECK64-IZFHMIN-NEXT: fmv.w.x ft2, a0 +; CHECK64-IZFHMIN-NEXT: fmax.s ft2, ft0, ft2 +; CHECK64-IZFHMIN-NEXT: fmin.s ft1, ft2, ft1 +; CHECK64-IZFHMIN-NEXT: fcvt.l.s a0, ft1, rtz +; CHECK64-IZFHMIN-NEXT: feq.s a1, ft0, ft0 ; CHECK64-IZFHMIN-NEXT: seqz a1, a1 ; CHECK64-IZFHMIN-NEXT: addi a1, a1, -1 ; CHECK64-IZFHMIN-NEXT: and a0, a1, a0 @@ -3249,15 +3249,15 @@ define signext i8 @fcvt_w_s_sat_i8(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_w_s_sat_i8: ; RV32IZFH: # %bb.0: # %start -; RV32IZFH-NEXT: lui a0, %hi(.LCPI36_0) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI36_0)(a0) -; RV32IZFH-NEXT: lui a0, %hi(.LCPI36_1) -; RV32IZFH-NEXT: flw ft1, %lo(.LCPI36_1)(a0) -; RV32IZFH-NEXT: fcvt.s.h ft2, fa0 -; RV32IZFH-NEXT: fmax.s ft0, ft2, ft0 -; RV32IZFH-NEXT: fmin.s ft0, ft0, ft1 -; RV32IZFH-NEXT: fcvt.w.s a0, ft0, rtz -; RV32IZFH-NEXT: feq.s a1, ft2, ft2 +; RV32IZFH-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFH-NEXT: lui a0, 798720 +; RV32IZFH-NEXT: fmv.w.x ft1, a0 +; RV32IZFH-NEXT: fmax.s ft1, ft0, ft1 +; RV32IZFH-NEXT: lui a0, 274400 +; RV32IZFH-NEXT: fmv.w.x ft2, a0 +; RV32IZFH-NEXT: fmin.s ft1, ft1, ft2 +; RV32IZFH-NEXT: fcvt.w.s a0, ft1, rtz +; RV32IZFH-NEXT: feq.s a1, ft0, ft0 ; RV32IZFH-NEXT: seqz a1, a1 ; RV32IZFH-NEXT: addi a1, a1, -1 ; RV32IZFH-NEXT: and a0, a1, a0 @@ -3265,15 +3265,15 @@ ; ; RV64IZFH-LABEL: fcvt_w_s_sat_i8: ; RV64IZFH: # %bb.0: # %start -; RV64IZFH-NEXT: lui a0, %hi(.LCPI36_0) -; RV64IZFH-NEXT: flw ft0, %lo(.LCPI36_0)(a0) -; RV64IZFH-NEXT: lui a0, %hi(.LCPI36_1) -; RV64IZFH-NEXT: flw ft1, %lo(.LCPI36_1)(a0) -; RV64IZFH-NEXT: fcvt.s.h ft2, fa0 -; RV64IZFH-NEXT: fmax.s ft0, ft2, ft0 -; RV64IZFH-NEXT: fmin.s ft0, ft0, ft1 -; RV64IZFH-NEXT: fcvt.l.s a0, ft0, rtz -; RV64IZFH-NEXT: feq.s a1, ft2, ft2 +; RV64IZFH-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFH-NEXT: lui a0, 798720 +; RV64IZFH-NEXT: fmv.w.x ft1, a0 +; RV64IZFH-NEXT: fmax.s ft1, ft0, ft1 +; RV64IZFH-NEXT: lui a0, 274400 +; RV64IZFH-NEXT: fmv.w.x ft2, a0 +; RV64IZFH-NEXT: fmin.s ft1, ft1, ft2 +; RV64IZFH-NEXT: fcvt.l.s a0, ft1, rtz +; RV64IZFH-NEXT: feq.s a1, ft0, ft0 ; RV64IZFH-NEXT: seqz a1, a1 ; RV64IZFH-NEXT: addi a1, a1, -1 ; RV64IZFH-NEXT: and a0, a1, a0 @@ -3281,15 +3281,15 @@ ; ; RV32IDZFH-LABEL: fcvt_w_s_sat_i8: ; RV32IDZFH: # %bb.0: # %start -; RV32IDZFH-NEXT: lui a0, %hi(.LCPI36_0) -; RV32IDZFH-NEXT: flw ft0, %lo(.LCPI36_0)(a0) -; RV32IDZFH-NEXT: lui a0, %hi(.LCPI36_1) -; RV32IDZFH-NEXT: flw ft1, %lo(.LCPI36_1)(a0) -; RV32IDZFH-NEXT: fcvt.s.h ft2, fa0 -; RV32IDZFH-NEXT: fmax.s ft0, ft2, ft0 -; RV32IDZFH-NEXT: fmin.s ft0, ft0, ft1 -; RV32IDZFH-NEXT: fcvt.w.s a0, ft0, rtz -; RV32IDZFH-NEXT: feq.s a1, ft2, ft2 +; RV32IDZFH-NEXT: fcvt.s.h ft0, fa0 +; RV32IDZFH-NEXT: lui a0, 798720 +; RV32IDZFH-NEXT: fmv.w.x ft1, a0 +; RV32IDZFH-NEXT: fmax.s ft1, ft0, ft1 +; RV32IDZFH-NEXT: lui a0, 274400 +; RV32IDZFH-NEXT: fmv.w.x ft2, a0 +; RV32IDZFH-NEXT: fmin.s ft1, ft1, ft2 +; RV32IDZFH-NEXT: fcvt.w.s a0, ft1, rtz +; RV32IDZFH-NEXT: feq.s a1, ft0, ft0 ; RV32IDZFH-NEXT: seqz a1, a1 ; RV32IDZFH-NEXT: addi a1, a1, -1 ; RV32IDZFH-NEXT: and a0, a1, a0 @@ -3297,15 +3297,15 @@ ; ; RV64IDZFH-LABEL: fcvt_w_s_sat_i8: ; RV64IDZFH: # %bb.0: # %start -; RV64IDZFH-NEXT: lui a0, %hi(.LCPI36_0) -; RV64IDZFH-NEXT: flw ft0, %lo(.LCPI36_0)(a0) -; RV64IDZFH-NEXT: lui a0, %hi(.LCPI36_1) -; RV64IDZFH-NEXT: flw ft1, %lo(.LCPI36_1)(a0) -; RV64IDZFH-NEXT: fcvt.s.h ft2, fa0 -; RV64IDZFH-NEXT: fmax.s ft0, ft2, ft0 -; RV64IDZFH-NEXT: fmin.s ft0, ft0, ft1 -; RV64IDZFH-NEXT: fcvt.l.s a0, ft0, rtz -; RV64IDZFH-NEXT: feq.s a1, ft2, ft2 +; RV64IDZFH-NEXT: fcvt.s.h ft0, fa0 +; RV64IDZFH-NEXT: lui a0, 798720 +; RV64IDZFH-NEXT: fmv.w.x ft1, a0 +; RV64IDZFH-NEXT: fmax.s ft1, ft0, ft1 +; RV64IDZFH-NEXT: lui a0, 274400 +; RV64IDZFH-NEXT: fmv.w.x ft2, a0 +; RV64IDZFH-NEXT: fmin.s ft1, ft1, ft2 +; RV64IDZFH-NEXT: fcvt.l.s a0, ft1, rtz +; RV64IDZFH-NEXT: feq.s a1, ft0, ft0 ; RV64IDZFH-NEXT: seqz a1, a1 ; RV64IDZFH-NEXT: addi a1, a1, -1 ; RV64IDZFH-NEXT: and a0, a1, a0 @@ -3399,15 +3399,15 @@ ; ; CHECK32-IZFHMIN-LABEL: fcvt_w_s_sat_i8: ; CHECK32-IZFHMIN: # %bb.0: # %start -; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI36_0) -; CHECK32-IZFHMIN-NEXT: flw ft0, %lo(.LCPI36_0)(a0) -; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI36_1) -; CHECK32-IZFHMIN-NEXT: flw ft1, %lo(.LCPI36_1)(a0) -; CHECK32-IZFHMIN-NEXT: fcvt.s.h ft2, fa0 -; CHECK32-IZFHMIN-NEXT: fmax.s ft0, ft2, ft0 -; CHECK32-IZFHMIN-NEXT: fmin.s ft0, ft0, ft1 -; CHECK32-IZFHMIN-NEXT: fcvt.w.s a0, ft0, rtz -; CHECK32-IZFHMIN-NEXT: feq.s a1, ft2, ft2 +; CHECK32-IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECK32-IZFHMIN-NEXT: lui a0, 798720 +; CHECK32-IZFHMIN-NEXT: fmv.w.x ft1, a0 +; CHECK32-IZFHMIN-NEXT: fmax.s ft1, ft0, ft1 +; CHECK32-IZFHMIN-NEXT: lui a0, 274400 +; CHECK32-IZFHMIN-NEXT: fmv.w.x ft2, a0 +; CHECK32-IZFHMIN-NEXT: fmin.s ft1, ft1, ft2 +; CHECK32-IZFHMIN-NEXT: fcvt.w.s a0, ft1, rtz +; CHECK32-IZFHMIN-NEXT: feq.s a1, ft0, ft0 ; CHECK32-IZFHMIN-NEXT: seqz a1, a1 ; CHECK32-IZFHMIN-NEXT: addi a1, a1, -1 ; CHECK32-IZFHMIN-NEXT: and a0, a1, a0 @@ -3415,15 +3415,15 @@ ; ; CHECK64-IZFHMIN-LABEL: fcvt_w_s_sat_i8: ; CHECK64-IZFHMIN: # %bb.0: # %start -; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI36_0) -; CHECK64-IZFHMIN-NEXT: flw ft0, %lo(.LCPI36_0)(a0) -; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI36_1) -; CHECK64-IZFHMIN-NEXT: flw ft1, %lo(.LCPI36_1)(a0) -; CHECK64-IZFHMIN-NEXT: fcvt.s.h ft2, fa0 -; CHECK64-IZFHMIN-NEXT: fmax.s ft0, ft2, ft0 -; CHECK64-IZFHMIN-NEXT: fmin.s ft0, ft0, ft1 -; CHECK64-IZFHMIN-NEXT: fcvt.l.s a0, ft0, rtz -; CHECK64-IZFHMIN-NEXT: feq.s a1, ft2, ft2 +; CHECK64-IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECK64-IZFHMIN-NEXT: lui a0, 798720 +; CHECK64-IZFHMIN-NEXT: fmv.w.x ft1, a0 +; CHECK64-IZFHMIN-NEXT: fmax.s ft1, ft0, ft1 +; CHECK64-IZFHMIN-NEXT: lui a0, 274400 +; CHECK64-IZFHMIN-NEXT: fmv.w.x ft2, a0 +; CHECK64-IZFHMIN-NEXT: fmin.s ft1, ft1, ft2 +; CHECK64-IZFHMIN-NEXT: fcvt.l.s a0, ft1, rtz +; CHECK64-IZFHMIN-NEXT: feq.s a1, ft0, ft0 ; CHECK64-IZFHMIN-NEXT: seqz a1, a1 ; CHECK64-IZFHMIN-NEXT: addi a1, a1, -1 ; CHECK64-IZFHMIN-NEXT: and a0, a1, a0 @@ -3497,45 +3497,45 @@ define zeroext i8 @fcvt_wu_s_sat_i8(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_wu_s_sat_i8: ; RV32IZFH: # %bb.0: # %start -; RV32IZFH-NEXT: lui a0, %hi(.LCPI38_0) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI38_0)(a0) -; RV32IZFH-NEXT: fcvt.s.h ft1, fa0 -; RV32IZFH-NEXT: fmv.w.x ft2, zero -; RV32IZFH-NEXT: fmax.s ft1, ft1, ft2 -; RV32IZFH-NEXT: fmin.s ft0, ft1, ft0 +; RV32IZFH-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFH-NEXT: fmv.w.x ft1, zero +; RV32IZFH-NEXT: fmax.s ft0, ft0, ft1 +; RV32IZFH-NEXT: lui a0, 276464 +; RV32IZFH-NEXT: fmv.w.x ft1, a0 +; RV32IZFH-NEXT: fmin.s ft0, ft0, ft1 ; RV32IZFH-NEXT: fcvt.wu.s a0, ft0, rtz ; RV32IZFH-NEXT: ret ; ; RV64IZFH-LABEL: fcvt_wu_s_sat_i8: ; RV64IZFH: # %bb.0: # %start -; RV64IZFH-NEXT: lui a0, %hi(.LCPI38_0) -; RV64IZFH-NEXT: flw ft0, %lo(.LCPI38_0)(a0) -; RV64IZFH-NEXT: fcvt.s.h ft1, fa0 -; RV64IZFH-NEXT: fmv.w.x ft2, zero -; RV64IZFH-NEXT: fmax.s ft1, ft1, ft2 -; RV64IZFH-NEXT: fmin.s ft0, ft1, ft0 +; RV64IZFH-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFH-NEXT: fmv.w.x ft1, zero +; RV64IZFH-NEXT: fmax.s ft0, ft0, ft1 +; RV64IZFH-NEXT: lui a0, 276464 +; RV64IZFH-NEXT: fmv.w.x ft1, a0 +; RV64IZFH-NEXT: fmin.s ft0, ft0, ft1 ; RV64IZFH-NEXT: fcvt.lu.s a0, ft0, rtz ; RV64IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: fcvt_wu_s_sat_i8: ; RV32IDZFH: # %bb.0: # %start -; RV32IDZFH-NEXT: lui a0, %hi(.LCPI38_0) -; RV32IDZFH-NEXT: flw ft0, %lo(.LCPI38_0)(a0) -; RV32IDZFH-NEXT: fcvt.s.h ft1, fa0 -; RV32IDZFH-NEXT: fmv.w.x ft2, zero -; RV32IDZFH-NEXT: fmax.s ft1, ft1, ft2 -; RV32IDZFH-NEXT: fmin.s ft0, ft1, ft0 +; RV32IDZFH-NEXT: fcvt.s.h ft0, fa0 +; RV32IDZFH-NEXT: fmv.w.x ft1, zero +; RV32IDZFH-NEXT: fmax.s ft0, ft0, ft1 +; RV32IDZFH-NEXT: lui a0, 276464 +; RV32IDZFH-NEXT: fmv.w.x ft1, a0 +; RV32IDZFH-NEXT: fmin.s ft0, ft0, ft1 ; RV32IDZFH-NEXT: fcvt.wu.s a0, ft0, rtz ; RV32IDZFH-NEXT: ret ; ; RV64IDZFH-LABEL: fcvt_wu_s_sat_i8: ; RV64IDZFH: # %bb.0: # %start -; RV64IDZFH-NEXT: lui a0, %hi(.LCPI38_0) -; RV64IDZFH-NEXT: flw ft0, %lo(.LCPI38_0)(a0) -; RV64IDZFH-NEXT: fcvt.s.h ft1, fa0 -; RV64IDZFH-NEXT: fmv.w.x ft2, zero -; RV64IDZFH-NEXT: fmax.s ft1, ft1, ft2 -; RV64IDZFH-NEXT: fmin.s ft0, ft1, ft0 +; RV64IDZFH-NEXT: fcvt.s.h ft0, fa0 +; RV64IDZFH-NEXT: fmv.w.x ft1, zero +; RV64IDZFH-NEXT: fmax.s ft0, ft0, ft1 +; RV64IDZFH-NEXT: lui a0, 276464 +; RV64IDZFH-NEXT: fmv.w.x ft1, a0 +; RV64IDZFH-NEXT: fmin.s ft0, ft0, ft1 ; RV64IDZFH-NEXT: fcvt.lu.s a0, ft0, rtz ; RV64IDZFH-NEXT: ret ; @@ -3615,23 +3615,23 @@ ; ; CHECK32-IZFHMIN-LABEL: fcvt_wu_s_sat_i8: ; CHECK32-IZFHMIN: # %bb.0: # %start -; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI38_0) -; CHECK32-IZFHMIN-NEXT: flw ft0, %lo(.LCPI38_0)(a0) -; CHECK32-IZFHMIN-NEXT: fcvt.s.h ft1, fa0 -; CHECK32-IZFHMIN-NEXT: fmv.w.x ft2, zero -; CHECK32-IZFHMIN-NEXT: fmax.s ft1, ft1, ft2 -; CHECK32-IZFHMIN-NEXT: fmin.s ft0, ft1, ft0 +; CHECK32-IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECK32-IZFHMIN-NEXT: fmv.w.x ft1, zero +; CHECK32-IZFHMIN-NEXT: fmax.s ft0, ft0, ft1 +; CHECK32-IZFHMIN-NEXT: lui a0, 276464 +; CHECK32-IZFHMIN-NEXT: fmv.w.x ft1, a0 +; CHECK32-IZFHMIN-NEXT: fmin.s ft0, ft0, ft1 ; CHECK32-IZFHMIN-NEXT: fcvt.wu.s a0, ft0, rtz ; CHECK32-IZFHMIN-NEXT: ret ; ; CHECK64-IZFHMIN-LABEL: fcvt_wu_s_sat_i8: ; CHECK64-IZFHMIN: # %bb.0: # %start -; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI38_0) -; CHECK64-IZFHMIN-NEXT: flw ft0, %lo(.LCPI38_0)(a0) -; CHECK64-IZFHMIN-NEXT: fcvt.s.h ft1, fa0 -; CHECK64-IZFHMIN-NEXT: fmv.w.x ft2, zero -; CHECK64-IZFHMIN-NEXT: fmax.s ft1, ft1, ft2 -; CHECK64-IZFHMIN-NEXT: fmin.s ft0, ft1, ft0 +; CHECK64-IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECK64-IZFHMIN-NEXT: fmv.w.x ft1, zero +; CHECK64-IZFHMIN-NEXT: fmax.s ft0, ft0, ft1 +; CHECK64-IZFHMIN-NEXT: lui a0, 276464 +; CHECK64-IZFHMIN-NEXT: fmv.w.x ft1, a0 +; CHECK64-IZFHMIN-NEXT: fmin.s ft0, ft0, ft1 ; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, ft0, rtz ; CHECK64-IZFHMIN-NEXT: ret start: diff --git a/llvm/test/CodeGen/RISCV/half-imm.ll b/llvm/test/CodeGen/RISCV/half-imm.ll --- a/llvm/test/CodeGen/RISCV/half-imm.ll +++ b/llvm/test/CodeGen/RISCV/half-imm.ll @@ -34,10 +34,10 @@ ; ; CHECKIZFHMIN-LABEL: half_imm_op: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI1_0) -; CHECKIZFHMIN-NEXT: flw ft0, %lo(.LCPI1_0)(a0) -; CHECKIZFHMIN-NEXT: fcvt.s.h ft1, fa0 -; CHECKIZFHMIN-NEXT: fadd.s ft0, ft1, ft0 +; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 260096 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 +; CHECKIZFHMIN-NEXT: fadd.s ft0, ft0, ft1 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, ft0 ; CHECKIZFHMIN-NEXT: ret %1 = fadd half %a, 1.0 diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll @@ -1526,9 +1526,9 @@ ; ; CHECKIZFHMIN-LABEL: floor_f16: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI17_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI17_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB17_2 @@ -1588,9 +1588,9 @@ ; ; CHECKIZFHMIN-LABEL: ceil_f16: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI18_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI18_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB18_2 @@ -1650,9 +1650,9 @@ ; ; CHECKIZFHMIN-LABEL: trunc_f16: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI19_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI19_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB19_2 @@ -1712,9 +1712,9 @@ ; ; CHECKIZFHMIN-LABEL: rint_f16: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI20_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI20_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB20_2 @@ -1850,9 +1850,9 @@ ; ; CHECKIZFHMIN-LABEL: round_f16: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI22_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI22_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB22_2 @@ -1912,9 +1912,9 @@ ; ; CHECKIZFHMIN-LABEL: roundeven_f16: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI23_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI23_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB23_2 diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll --- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll @@ -20,9 +20,9 @@ ; ; CHECKIZFHMIN-LABEL: test_floor_si32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI0_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI0_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB0_2 @@ -61,9 +61,9 @@ ; RV32IZFH-NEXT: fcvt.h.w ft0, a0, rdn ; RV32IZFH-NEXT: fsgnj.h fa0, ft0, fa0 ; RV32IZFH-NEXT: .LBB1_2: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI1_1) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI1_1)(a0) ; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: lui a0, 913408 +; RV32IZFH-NEXT: fmv.w.x ft0, a0 ; RV32IZFH-NEXT: fle.s s0, ft0, fs0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixsfdi@plt @@ -72,8 +72,8 @@ ; RV32IZFH-NEXT: # %bb.3: ; RV32IZFH-NEXT: lui a1, 524288 ; RV32IZFH-NEXT: .LBB1_4: -; RV32IZFH-NEXT: lui a2, %hi(.LCPI1_2) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI1_2)(a2) +; RV32IZFH-NEXT: lui a2, %hi(.LCPI1_1) +; RV32IZFH-NEXT: flw ft0, %lo(.LCPI1_1)(a2) ; RV32IZFH-NEXT: flt.s a2, ft0, fs0 ; RV32IZFH-NEXT: beqz a2, .LBB1_6 ; RV32IZFH-NEXT: # %bb.5: @@ -105,13 +105,9 @@ ; ; RV32IZFHMIN-LABEL: test_floor_si64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI1_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI1_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB1_2 @@ -120,11 +116,15 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rdn ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB1_2: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI1_1) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI1_1)(a0) +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s ft0, ft0 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, ft0 -; RV32IZFHMIN-NEXT: fle.s s0, ft1, fs0 +; RV32IZFHMIN-NEXT: lui a0, 913408 +; RV32IZFHMIN-NEXT: fmv.w.x ft0, a0 +; RV32IZFHMIN-NEXT: fle.s s0, ft0, fs0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixsfdi@plt ; RV32IZFHMIN-NEXT: lui a3, 524288 @@ -132,8 +132,8 @@ ; RV32IZFHMIN-NEXT: # %bb.3: ; RV32IZFHMIN-NEXT: lui a1, 524288 ; RV32IZFHMIN-NEXT: .LBB1_4: -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI1_2) -; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI1_2)(a2) +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI1_0) +; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI1_0)(a2) ; RV32IZFHMIN-NEXT: flt.s a2, ft0, fs0 ; RV32IZFHMIN-NEXT: beqz a2, .LBB1_6 ; RV32IZFHMIN-NEXT: # %bb.5: @@ -156,9 +156,9 @@ ; ; RV64IZFHMIN-LABEL: test_floor_si64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI1_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI1_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB1_2 @@ -192,9 +192,9 @@ ; ; RV32IZFHMIN-LABEL: test_floor_ui32: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI2_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI2_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB2_2 @@ -214,9 +214,9 @@ ; ; RV64IZFHMIN-LABEL: test_floor_ui32: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI2_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI2_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB2_2 @@ -286,13 +286,9 @@ ; ; RV32IZFHMIN-LABEL: test_floor_ui64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI3_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI3_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB3_2 @@ -301,6 +297,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rdn ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB3_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s ft0, ft0 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, ft0 ; RV32IZFHMIN-NEXT: fmv.w.x ft0, zero @@ -308,8 +308,8 @@ ; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi@plt -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI3_1) -; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI3_1)(a2) +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI3_0) +; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI3_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s0, a0 ; RV32IZFHMIN-NEXT: flt.s a2, ft0, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 @@ -324,9 +324,9 @@ ; ; RV64IZFHMIN-LABEL: test_floor_ui64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI3_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI3_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB3_2 @@ -360,9 +360,9 @@ ; ; CHECKIZFHMIN-LABEL: test_ceil_si32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI4_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI4_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB4_2 @@ -401,9 +401,9 @@ ; RV32IZFH-NEXT: fcvt.h.w ft0, a0, rup ; RV32IZFH-NEXT: fsgnj.h fa0, ft0, fa0 ; RV32IZFH-NEXT: .LBB5_2: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI5_1) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI5_1)(a0) ; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: lui a0, 913408 +; RV32IZFH-NEXT: fmv.w.x ft0, a0 ; RV32IZFH-NEXT: fle.s s0, ft0, fs0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixsfdi@plt @@ -412,8 +412,8 @@ ; RV32IZFH-NEXT: # %bb.3: ; RV32IZFH-NEXT: lui a1, 524288 ; RV32IZFH-NEXT: .LBB5_4: -; RV32IZFH-NEXT: lui a2, %hi(.LCPI5_2) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI5_2)(a2) +; RV32IZFH-NEXT: lui a2, %hi(.LCPI5_1) +; RV32IZFH-NEXT: flw ft0, %lo(.LCPI5_1)(a2) ; RV32IZFH-NEXT: flt.s a2, ft0, fs0 ; RV32IZFH-NEXT: beqz a2, .LBB5_6 ; RV32IZFH-NEXT: # %bb.5: @@ -445,13 +445,9 @@ ; ; RV32IZFHMIN-LABEL: test_ceil_si64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI5_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI5_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB5_2 @@ -460,11 +456,15 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rup ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB5_2: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI5_1) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI5_1)(a0) +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s ft0, ft0 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, ft0 -; RV32IZFHMIN-NEXT: fle.s s0, ft1, fs0 +; RV32IZFHMIN-NEXT: lui a0, 913408 +; RV32IZFHMIN-NEXT: fmv.w.x ft0, a0 +; RV32IZFHMIN-NEXT: fle.s s0, ft0, fs0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixsfdi@plt ; RV32IZFHMIN-NEXT: lui a3, 524288 @@ -472,8 +472,8 @@ ; RV32IZFHMIN-NEXT: # %bb.3: ; RV32IZFHMIN-NEXT: lui a1, 524288 ; RV32IZFHMIN-NEXT: .LBB5_4: -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI5_2) -; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI5_2)(a2) +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI5_0) +; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI5_0)(a2) ; RV32IZFHMIN-NEXT: flt.s a2, ft0, fs0 ; RV32IZFHMIN-NEXT: beqz a2, .LBB5_6 ; RV32IZFHMIN-NEXT: # %bb.5: @@ -496,9 +496,9 @@ ; ; RV64IZFHMIN-LABEL: test_ceil_si64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI5_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI5_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB5_2 @@ -532,9 +532,9 @@ ; ; RV32IZFHMIN-LABEL: test_ceil_ui32: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI6_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI6_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB6_2 @@ -554,9 +554,9 @@ ; ; RV64IZFHMIN-LABEL: test_ceil_ui32: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI6_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI6_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB6_2 @@ -626,13 +626,9 @@ ; ; RV32IZFHMIN-LABEL: test_ceil_ui64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI7_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI7_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB7_2 @@ -641,6 +637,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rup ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB7_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s ft0, ft0 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, ft0 ; RV32IZFHMIN-NEXT: fmv.w.x ft0, zero @@ -648,8 +648,8 @@ ; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi@plt -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI7_1) -; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI7_1)(a2) +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI7_0) +; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI7_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s0, a0 ; RV32IZFHMIN-NEXT: flt.s a2, ft0, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 @@ -664,9 +664,9 @@ ; ; RV64IZFHMIN-LABEL: test_ceil_ui64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI7_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI7_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB7_2 @@ -700,9 +700,9 @@ ; ; CHECKIZFHMIN-LABEL: test_trunc_si32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI8_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI8_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB8_2 @@ -741,9 +741,9 @@ ; RV32IZFH-NEXT: fcvt.h.w ft0, a0, rtz ; RV32IZFH-NEXT: fsgnj.h fa0, ft0, fa0 ; RV32IZFH-NEXT: .LBB9_2: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI9_1) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI9_1)(a0) ; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: lui a0, 913408 +; RV32IZFH-NEXT: fmv.w.x ft0, a0 ; RV32IZFH-NEXT: fle.s s0, ft0, fs0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixsfdi@plt @@ -752,8 +752,8 @@ ; RV32IZFH-NEXT: # %bb.3: ; RV32IZFH-NEXT: lui a1, 524288 ; RV32IZFH-NEXT: .LBB9_4: -; RV32IZFH-NEXT: lui a2, %hi(.LCPI9_2) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI9_2)(a2) +; RV32IZFH-NEXT: lui a2, %hi(.LCPI9_1) +; RV32IZFH-NEXT: flw ft0, %lo(.LCPI9_1)(a2) ; RV32IZFH-NEXT: flt.s a2, ft0, fs0 ; RV32IZFH-NEXT: beqz a2, .LBB9_6 ; RV32IZFH-NEXT: # %bb.5: @@ -785,13 +785,9 @@ ; ; RV32IZFHMIN-LABEL: test_trunc_si64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI9_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI9_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB9_2 @@ -800,11 +796,15 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rtz ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB9_2: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI9_1) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI9_1)(a0) +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s ft0, ft0 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, ft0 -; RV32IZFHMIN-NEXT: fle.s s0, ft1, fs0 +; RV32IZFHMIN-NEXT: lui a0, 913408 +; RV32IZFHMIN-NEXT: fmv.w.x ft0, a0 +; RV32IZFHMIN-NEXT: fle.s s0, ft0, fs0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixsfdi@plt ; RV32IZFHMIN-NEXT: lui a3, 524288 @@ -812,8 +812,8 @@ ; RV32IZFHMIN-NEXT: # %bb.3: ; RV32IZFHMIN-NEXT: lui a1, 524288 ; RV32IZFHMIN-NEXT: .LBB9_4: -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI9_2) -; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI9_2)(a2) +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI9_0) +; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI9_0)(a2) ; RV32IZFHMIN-NEXT: flt.s a2, ft0, fs0 ; RV32IZFHMIN-NEXT: beqz a2, .LBB9_6 ; RV32IZFHMIN-NEXT: # %bb.5: @@ -836,9 +836,9 @@ ; ; RV64IZFHMIN-LABEL: test_trunc_si64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI9_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI9_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB9_2 @@ -872,9 +872,9 @@ ; ; RV32IZFHMIN-LABEL: test_trunc_ui32: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI10_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI10_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB10_2 @@ -894,9 +894,9 @@ ; ; RV64IZFHMIN-LABEL: test_trunc_ui32: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI10_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI10_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB10_2 @@ -966,13 +966,9 @@ ; ; RV32IZFHMIN-LABEL: test_trunc_ui64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI11_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI11_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB11_2 @@ -981,6 +977,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rtz ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB11_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s ft0, ft0 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, ft0 ; RV32IZFHMIN-NEXT: fmv.w.x ft0, zero @@ -988,8 +988,8 @@ ; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi@plt -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI11_1) -; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI11_1)(a2) +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI11_0) +; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI11_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s0, a0 ; RV32IZFHMIN-NEXT: flt.s a2, ft0, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 @@ -1004,9 +1004,9 @@ ; ; RV64IZFHMIN-LABEL: test_trunc_ui64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI11_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI11_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB11_2 @@ -1040,9 +1040,9 @@ ; ; CHECKIZFHMIN-LABEL: test_round_si32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI12_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI12_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB12_2 @@ -1081,9 +1081,9 @@ ; RV32IZFH-NEXT: fcvt.h.w ft0, a0, rmm ; RV32IZFH-NEXT: fsgnj.h fa0, ft0, fa0 ; RV32IZFH-NEXT: .LBB13_2: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI13_1) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI13_1)(a0) ; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: lui a0, 913408 +; RV32IZFH-NEXT: fmv.w.x ft0, a0 ; RV32IZFH-NEXT: fle.s s0, ft0, fs0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixsfdi@plt @@ -1092,8 +1092,8 @@ ; RV32IZFH-NEXT: # %bb.3: ; RV32IZFH-NEXT: lui a1, 524288 ; RV32IZFH-NEXT: .LBB13_4: -; RV32IZFH-NEXT: lui a2, %hi(.LCPI13_2) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI13_2)(a2) +; RV32IZFH-NEXT: lui a2, %hi(.LCPI13_1) +; RV32IZFH-NEXT: flw ft0, %lo(.LCPI13_1)(a2) ; RV32IZFH-NEXT: flt.s a2, ft0, fs0 ; RV32IZFH-NEXT: beqz a2, .LBB13_6 ; RV32IZFH-NEXT: # %bb.5: @@ -1125,13 +1125,9 @@ ; ; RV32IZFHMIN-LABEL: test_round_si64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI13_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI13_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB13_2 @@ -1140,11 +1136,15 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rmm ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB13_2: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI13_1) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI13_1)(a0) +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s ft0, ft0 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, ft0 -; RV32IZFHMIN-NEXT: fle.s s0, ft1, fs0 +; RV32IZFHMIN-NEXT: lui a0, 913408 +; RV32IZFHMIN-NEXT: fmv.w.x ft0, a0 +; RV32IZFHMIN-NEXT: fle.s s0, ft0, fs0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixsfdi@plt ; RV32IZFHMIN-NEXT: lui a3, 524288 @@ -1152,8 +1152,8 @@ ; RV32IZFHMIN-NEXT: # %bb.3: ; RV32IZFHMIN-NEXT: lui a1, 524288 ; RV32IZFHMIN-NEXT: .LBB13_4: -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI13_2) -; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI13_2)(a2) +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI13_0) +; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI13_0)(a2) ; RV32IZFHMIN-NEXT: flt.s a2, ft0, fs0 ; RV32IZFHMIN-NEXT: beqz a2, .LBB13_6 ; RV32IZFHMIN-NEXT: # %bb.5: @@ -1176,9 +1176,9 @@ ; ; RV64IZFHMIN-LABEL: test_round_si64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI13_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI13_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB13_2 @@ -1212,9 +1212,9 @@ ; ; RV32IZFHMIN-LABEL: test_round_ui32: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI14_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI14_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB14_2 @@ -1234,9 +1234,9 @@ ; ; RV64IZFHMIN-LABEL: test_round_ui32: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI14_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI14_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB14_2 @@ -1306,13 +1306,9 @@ ; ; RV32IZFHMIN-LABEL: test_round_ui64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI15_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI15_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB15_2 @@ -1321,6 +1317,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rmm ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB15_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s ft0, ft0 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, ft0 ; RV32IZFHMIN-NEXT: fmv.w.x ft0, zero @@ -1328,8 +1328,8 @@ ; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi@plt -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI15_1) -; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI15_1)(a2) +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI15_0) +; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI15_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s0, a0 ; RV32IZFHMIN-NEXT: flt.s a2, ft0, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 @@ -1344,9 +1344,9 @@ ; ; RV64IZFHMIN-LABEL: test_round_ui64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI15_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI15_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB15_2 @@ -1380,9 +1380,9 @@ ; ; CHECKIZFHMIN-LABEL: test_roundeven_si32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI16_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI16_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB16_2 @@ -1421,9 +1421,9 @@ ; RV32IZFH-NEXT: fcvt.h.w ft0, a0, rne ; RV32IZFH-NEXT: fsgnj.h fa0, ft0, fa0 ; RV32IZFH-NEXT: .LBB17_2: -; RV32IZFH-NEXT: lui a0, %hi(.LCPI17_1) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI17_1)(a0) ; RV32IZFH-NEXT: fcvt.s.h fs0, fa0 +; RV32IZFH-NEXT: lui a0, 913408 +; RV32IZFH-NEXT: fmv.w.x ft0, a0 ; RV32IZFH-NEXT: fle.s s0, ft0, fs0 ; RV32IZFH-NEXT: fmv.s fa0, fs0 ; RV32IZFH-NEXT: call __fixsfdi@plt @@ -1432,8 +1432,8 @@ ; RV32IZFH-NEXT: # %bb.3: ; RV32IZFH-NEXT: lui a1, 524288 ; RV32IZFH-NEXT: .LBB17_4: -; RV32IZFH-NEXT: lui a2, %hi(.LCPI17_2) -; RV32IZFH-NEXT: flw ft0, %lo(.LCPI17_2)(a2) +; RV32IZFH-NEXT: lui a2, %hi(.LCPI17_1) +; RV32IZFH-NEXT: flw ft0, %lo(.LCPI17_1)(a2) ; RV32IZFH-NEXT: flt.s a2, ft0, fs0 ; RV32IZFH-NEXT: beqz a2, .LBB17_6 ; RV32IZFH-NEXT: # %bb.5: @@ -1465,13 +1465,9 @@ ; ; RV32IZFHMIN-LABEL: test_roundeven_si64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI17_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI17_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB17_2 @@ -1480,11 +1476,15 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rne ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB17_2: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI17_1) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI17_1)(a0) +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s ft0, ft0 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, ft0 -; RV32IZFHMIN-NEXT: fle.s s0, ft1, fs0 +; RV32IZFHMIN-NEXT: lui a0, 913408 +; RV32IZFHMIN-NEXT: fmv.w.x ft0, a0 +; RV32IZFHMIN-NEXT: fle.s s0, ft0, fs0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixsfdi@plt ; RV32IZFHMIN-NEXT: lui a3, 524288 @@ -1492,8 +1492,8 @@ ; RV32IZFHMIN-NEXT: # %bb.3: ; RV32IZFHMIN-NEXT: lui a1, 524288 ; RV32IZFHMIN-NEXT: .LBB17_4: -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI17_2) -; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI17_2)(a2) +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI17_0) +; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI17_0)(a2) ; RV32IZFHMIN-NEXT: flt.s a2, ft0, fs0 ; RV32IZFHMIN-NEXT: beqz a2, .LBB17_6 ; RV32IZFHMIN-NEXT: # %bb.5: @@ -1516,9 +1516,9 @@ ; ; RV64IZFHMIN-LABEL: test_roundeven_si64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI17_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI17_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB17_2 @@ -1552,9 +1552,9 @@ ; ; RV32IZFHMIN-LABEL: test_roundeven_ui32: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI18_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI18_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB18_2 @@ -1574,9 +1574,9 @@ ; ; RV64IZFHMIN-LABEL: test_roundeven_ui32: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI18_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI18_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB18_2 @@ -1646,13 +1646,9 @@ ; ; RV32IZFHMIN-LABEL: test_roundeven_ui64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI19_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI19_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB19_2 @@ -1661,6 +1657,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rne ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB19_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill ; RV32IZFHMIN-NEXT: fcvt.h.s ft0, ft0 ; RV32IZFHMIN-NEXT: fcvt.s.h fs0, ft0 ; RV32IZFHMIN-NEXT: fmv.w.x ft0, zero @@ -1668,8 +1668,8 @@ ; RV32IZFHMIN-NEXT: neg s0, a0 ; RV32IZFHMIN-NEXT: fmv.s fa0, fs0 ; RV32IZFHMIN-NEXT: call __fixunssfdi@plt -; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI19_1) -; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI19_1)(a2) +; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI19_0) +; RV32IZFHMIN-NEXT: flw ft0, %lo(.LCPI19_0)(a2) ; RV32IZFHMIN-NEXT: and a0, s0, a0 ; RV32IZFHMIN-NEXT: flt.s a2, ft0, fs0 ; RV32IZFHMIN-NEXT: neg a2, a2 @@ -1684,9 +1684,9 @@ ; ; RV64IZFHMIN-LABEL: test_roundeven_ui64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI19_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI19_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB19_2 diff --git a/llvm/test/CodeGen/RISCV/half-round-conv.ll b/llvm/test/CodeGen/RISCV/half-round-conv.ll --- a/llvm/test/CodeGen/RISCV/half-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/half-round-conv.ll @@ -21,9 +21,9 @@ ; ; RV32IZFHMIN-LABEL: test_floor_si8: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI0_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI0_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB0_2 @@ -39,9 +39,9 @@ ; ; RV64IZFHMIN-LABEL: test_floor_si8: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI0_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI0_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB0_2 @@ -72,9 +72,9 @@ ; ; RV32IZFHMIN-LABEL: test_floor_si16: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI1_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI1_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB1_2 @@ -90,9 +90,9 @@ ; ; RV64IZFHMIN-LABEL: test_floor_si16: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI1_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI1_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB1_2 @@ -118,9 +118,9 @@ ; ; CHECKIZFHMIN-LABEL: test_floor_si32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI2_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI2_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB2_2 @@ -167,13 +167,9 @@ ; ; RV32IZFHMIN-LABEL: test_floor_si64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI3_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI3_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB3_2 @@ -182,6 +178,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rdn ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB3_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 ; RV32IZFHMIN-NEXT: fcvt.h.s fa0, ft0 ; RV32IZFHMIN-NEXT: call __fixhfdi@plt ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -190,9 +190,9 @@ ; ; RV64IZFHMIN-LABEL: test_floor_si64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI3_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI3_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB3_2 @@ -223,9 +223,9 @@ ; ; RV32IZFHMIN-LABEL: test_floor_ui8: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI4_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI4_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB4_2 @@ -241,9 +241,9 @@ ; ; RV64IZFHMIN-LABEL: test_floor_ui8: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI4_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI4_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB4_2 @@ -274,9 +274,9 @@ ; ; RV32IZFHMIN-LABEL: test_floor_ui16: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI5_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI5_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB5_2 @@ -292,9 +292,9 @@ ; ; RV64IZFHMIN-LABEL: test_floor_ui16: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI5_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI5_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB5_2 @@ -320,9 +320,9 @@ ; ; CHECKIZFHMIN-LABEL: test_floor_ui32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI6_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI6_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB6_2 @@ -369,13 +369,9 @@ ; ; RV32IZFHMIN-LABEL: test_floor_ui64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI7_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI7_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB7_2 @@ -384,6 +380,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rdn ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB7_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 ; RV32IZFHMIN-NEXT: fcvt.h.s fa0, ft0 ; RV32IZFHMIN-NEXT: call __fixunshfdi@plt ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -392,9 +392,9 @@ ; ; RV64IZFHMIN-LABEL: test_floor_ui64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI7_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI7_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB7_2 @@ -425,9 +425,9 @@ ; ; RV32IZFHMIN-LABEL: test_ceil_si8: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI8_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI8_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB8_2 @@ -443,9 +443,9 @@ ; ; RV64IZFHMIN-LABEL: test_ceil_si8: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI8_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI8_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB8_2 @@ -476,9 +476,9 @@ ; ; RV32IZFHMIN-LABEL: test_ceil_si16: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI9_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI9_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB9_2 @@ -494,9 +494,9 @@ ; ; RV64IZFHMIN-LABEL: test_ceil_si16: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI9_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI9_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB9_2 @@ -522,9 +522,9 @@ ; ; CHECKIZFHMIN-LABEL: test_ceil_si32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI10_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI10_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB10_2 @@ -571,13 +571,9 @@ ; ; RV32IZFHMIN-LABEL: test_ceil_si64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI11_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI11_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB11_2 @@ -586,6 +582,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rup ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB11_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 ; RV32IZFHMIN-NEXT: fcvt.h.s fa0, ft0 ; RV32IZFHMIN-NEXT: call __fixhfdi@plt ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -594,9 +594,9 @@ ; ; RV64IZFHMIN-LABEL: test_ceil_si64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI11_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI11_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB11_2 @@ -627,9 +627,9 @@ ; ; RV32IZFHMIN-LABEL: test_ceil_ui8: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI12_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI12_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB12_2 @@ -645,9 +645,9 @@ ; ; RV64IZFHMIN-LABEL: test_ceil_ui8: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI12_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI12_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB12_2 @@ -678,9 +678,9 @@ ; ; RV32IZFHMIN-LABEL: test_ceil_ui16: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI13_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI13_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB13_2 @@ -696,9 +696,9 @@ ; ; RV64IZFHMIN-LABEL: test_ceil_ui16: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI13_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI13_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB13_2 @@ -724,9 +724,9 @@ ; ; CHECKIZFHMIN-LABEL: test_ceil_ui32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI14_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI14_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB14_2 @@ -773,13 +773,9 @@ ; ; RV32IZFHMIN-LABEL: test_ceil_ui64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI15_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI15_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB15_2 @@ -788,6 +784,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rup ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB15_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 ; RV32IZFHMIN-NEXT: fcvt.h.s fa0, ft0 ; RV32IZFHMIN-NEXT: call __fixunshfdi@plt ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -796,9 +796,9 @@ ; ; RV64IZFHMIN-LABEL: test_ceil_ui64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI15_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI15_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB15_2 @@ -829,9 +829,9 @@ ; ; RV32IZFHMIN-LABEL: test_trunc_si8: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI16_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI16_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB16_2 @@ -847,9 +847,9 @@ ; ; RV64IZFHMIN-LABEL: test_trunc_si8: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI16_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI16_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB16_2 @@ -880,9 +880,9 @@ ; ; RV32IZFHMIN-LABEL: test_trunc_si16: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI17_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI17_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB17_2 @@ -898,9 +898,9 @@ ; ; RV64IZFHMIN-LABEL: test_trunc_si16: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI17_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI17_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB17_2 @@ -926,9 +926,9 @@ ; ; CHECKIZFHMIN-LABEL: test_trunc_si32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI18_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI18_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB18_2 @@ -975,13 +975,9 @@ ; ; RV32IZFHMIN-LABEL: test_trunc_si64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI19_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI19_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB19_2 @@ -990,6 +986,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rtz ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB19_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 ; RV32IZFHMIN-NEXT: fcvt.h.s fa0, ft0 ; RV32IZFHMIN-NEXT: call __fixhfdi@plt ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -998,9 +998,9 @@ ; ; RV64IZFHMIN-LABEL: test_trunc_si64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI19_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI19_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB19_2 @@ -1031,9 +1031,9 @@ ; ; RV32IZFHMIN-LABEL: test_trunc_ui8: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI20_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI20_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB20_2 @@ -1049,9 +1049,9 @@ ; ; RV64IZFHMIN-LABEL: test_trunc_ui8: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI20_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI20_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB20_2 @@ -1082,9 +1082,9 @@ ; ; RV32IZFHMIN-LABEL: test_trunc_ui16: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI21_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI21_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB21_2 @@ -1100,9 +1100,9 @@ ; ; RV64IZFHMIN-LABEL: test_trunc_ui16: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI21_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI21_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB21_2 @@ -1128,9 +1128,9 @@ ; ; CHECKIZFHMIN-LABEL: test_trunc_ui32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI22_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI22_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB22_2 @@ -1177,13 +1177,9 @@ ; ; RV32IZFHMIN-LABEL: test_trunc_ui64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI23_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI23_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB23_2 @@ -1192,6 +1188,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rtz ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB23_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 ; RV32IZFHMIN-NEXT: fcvt.h.s fa0, ft0 ; RV32IZFHMIN-NEXT: call __fixunshfdi@plt ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -1200,9 +1200,9 @@ ; ; RV64IZFHMIN-LABEL: test_trunc_ui64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI23_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI23_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB23_2 @@ -1233,9 +1233,9 @@ ; ; RV32IZFHMIN-LABEL: test_round_si8: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI24_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI24_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB24_2 @@ -1251,9 +1251,9 @@ ; ; RV64IZFHMIN-LABEL: test_round_si8: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI24_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI24_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB24_2 @@ -1284,9 +1284,9 @@ ; ; RV32IZFHMIN-LABEL: test_round_si16: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI25_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI25_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB25_2 @@ -1302,9 +1302,9 @@ ; ; RV64IZFHMIN-LABEL: test_round_si16: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI25_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI25_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB25_2 @@ -1330,9 +1330,9 @@ ; ; CHECKIZFHMIN-LABEL: test_round_si32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI26_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI26_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB26_2 @@ -1379,13 +1379,9 @@ ; ; RV32IZFHMIN-LABEL: test_round_si64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI27_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI27_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB27_2 @@ -1394,6 +1390,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rmm ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB27_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 ; RV32IZFHMIN-NEXT: fcvt.h.s fa0, ft0 ; RV32IZFHMIN-NEXT: call __fixhfdi@plt ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -1402,9 +1402,9 @@ ; ; RV64IZFHMIN-LABEL: test_round_si64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI27_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI27_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB27_2 @@ -1435,9 +1435,9 @@ ; ; RV32IZFHMIN-LABEL: test_round_ui8: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI28_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI28_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB28_2 @@ -1453,9 +1453,9 @@ ; ; RV64IZFHMIN-LABEL: test_round_ui8: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI28_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI28_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB28_2 @@ -1486,9 +1486,9 @@ ; ; RV32IZFHMIN-LABEL: test_round_ui16: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI29_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI29_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB29_2 @@ -1504,9 +1504,9 @@ ; ; RV64IZFHMIN-LABEL: test_round_ui16: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI29_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI29_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB29_2 @@ -1532,9 +1532,9 @@ ; ; CHECKIZFHMIN-LABEL: test_round_ui32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI30_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI30_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB30_2 @@ -1581,13 +1581,9 @@ ; ; RV32IZFHMIN-LABEL: test_round_ui64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI31_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI31_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB31_2 @@ -1596,6 +1592,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rmm ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB31_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 ; RV32IZFHMIN-NEXT: fcvt.h.s fa0, ft0 ; RV32IZFHMIN-NEXT: call __fixunshfdi@plt ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -1604,9 +1604,9 @@ ; ; RV64IZFHMIN-LABEL: test_round_ui64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI31_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI31_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB31_2 @@ -1637,9 +1637,9 @@ ; ; RV32IZFHMIN-LABEL: test_roundeven_si8: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI32_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI32_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB32_2 @@ -1655,9 +1655,9 @@ ; ; RV64IZFHMIN-LABEL: test_roundeven_si8: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI32_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI32_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB32_2 @@ -1688,9 +1688,9 @@ ; ; RV32IZFHMIN-LABEL: test_roundeven_si16: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI33_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI33_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB33_2 @@ -1706,9 +1706,9 @@ ; ; RV64IZFHMIN-LABEL: test_roundeven_si16: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI33_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI33_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB33_2 @@ -1734,9 +1734,9 @@ ; ; CHECKIZFHMIN-LABEL: test_roundeven_si32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI34_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI34_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB34_2 @@ -1783,13 +1783,9 @@ ; ; RV32IZFHMIN-LABEL: test_roundeven_si64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI35_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI35_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB35_2 @@ -1798,6 +1794,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rne ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB35_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 ; RV32IZFHMIN-NEXT: fcvt.h.s fa0, ft0 ; RV32IZFHMIN-NEXT: call __fixhfdi@plt ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -1806,9 +1806,9 @@ ; ; RV64IZFHMIN-LABEL: test_roundeven_si64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI35_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI35_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB35_2 @@ -1839,9 +1839,9 @@ ; ; RV32IZFHMIN-LABEL: test_roundeven_ui8: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI36_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI36_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB36_2 @@ -1857,9 +1857,9 @@ ; ; RV64IZFHMIN-LABEL: test_roundeven_ui8: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI36_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI36_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB36_2 @@ -1890,9 +1890,9 @@ ; ; RV32IZFHMIN-LABEL: test_roundeven_ui16: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI37_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI37_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB37_2 @@ -1908,9 +1908,9 @@ ; ; RV64IZFHMIN-LABEL: test_roundeven_ui16: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI37_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI37_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB37_2 @@ -1936,9 +1936,9 @@ ; ; CHECKIZFHMIN-LABEL: test_roundeven_ui32: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI38_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI38_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB38_2 @@ -1985,13 +1985,9 @@ ; ; RV32IZFHMIN-LABEL: test_roundeven_ui64: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: addi sp, sp, -16 -; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 -; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 -; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI39_0) -; RV32IZFHMIN-NEXT: flw ft1, %lo(.LCPI39_0)(a0) ; RV32IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV32IZFHMIN-NEXT: lui a0, 307200 +; RV32IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV32IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV32IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV32IZFHMIN-NEXT: beqz a0, .LBB39_2 @@ -2000,6 +1996,10 @@ ; RV32IZFHMIN-NEXT: fcvt.s.w ft1, a0, rne ; RV32IZFHMIN-NEXT: fsgnj.s ft0, ft1, ft0 ; RV32IZFHMIN-NEXT: .LBB39_2: +; RV32IZFHMIN-NEXT: addi sp, sp, -16 +; RV32IZFHMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFHMIN-NEXT: .cfi_offset ra, -4 ; RV32IZFHMIN-NEXT: fcvt.h.s fa0, ft0 ; RV32IZFHMIN-NEXT: call __fixunshfdi@plt ; RV32IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload @@ -2008,9 +2008,9 @@ ; ; RV64IZFHMIN-LABEL: test_roundeven_ui64: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI39_0) -; RV64IZFHMIN-NEXT: flw ft1, %lo(.LCPI39_0)(a0) ; RV64IZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; RV64IZFHMIN-NEXT: lui a0, 307200 +; RV64IZFHMIN-NEXT: fmv.w.x ft1, a0 ; RV64IZFHMIN-NEXT: fabs.s ft2, ft0 ; RV64IZFHMIN-NEXT: flt.s a0, ft2, ft1 ; RV64IZFHMIN-NEXT: beqz a0, .LBB39_2 @@ -2066,9 +2066,9 @@ ; ; CHECKIZFHMIN-LABEL: test_floor_half: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI40_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI40_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB40_2 @@ -2121,9 +2121,9 @@ ; ; CHECKIZFHMIN-LABEL: test_ceil_half: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI41_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI41_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB41_2 @@ -2176,9 +2176,9 @@ ; ; CHECKIZFHMIN-LABEL: test_trunc_half: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI42_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI42_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB42_2 @@ -2231,9 +2231,9 @@ ; ; CHECKIZFHMIN-LABEL: test_round_half: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI43_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI43_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB43_2 @@ -2286,9 +2286,9 @@ ; ; CHECKIZFHMIN-LABEL: test_roundeven_half: ; CHECKIZFHMIN: # %bb.0: -; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI44_0) -; CHECKIZFHMIN-NEXT: flw ft1, %lo(.LCPI44_0)(a0) ; CHECKIZFHMIN-NEXT: fcvt.s.h ft0, fa0 +; CHECKIZFHMIN-NEXT: lui a0, 307200 +; CHECKIZFHMIN-NEXT: fmv.w.x ft1, a0 ; CHECKIZFHMIN-NEXT: fabs.s ft2, ft0 ; CHECKIZFHMIN-NEXT: flt.s a0, ft2, ft1 ; CHECKIZFHMIN-NEXT: beqz a0, .LBB44_2 diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll @@ -279,10 +279,10 @@ define @vp_ceil_vv_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -300,10 +300,10 @@ define @vp_ceil_vv_nxv1f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -323,10 +323,10 @@ define @vp_ceil_vv_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -344,10 +344,10 @@ define @vp_ceil_vv_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -368,10 +368,10 @@ ; CHECK-LABEL: vp_ceil_vv_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI16_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -390,10 +390,10 @@ define @vp_ceil_vv_nxv4f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI17_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -414,10 +414,10 @@ ; CHECK-LABEL: vp_ceil_vv_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -436,10 +436,10 @@ define @vp_ceil_vv_nxv8f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI19_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -460,10 +460,10 @@ ; CHECK-LABEL: vp_ceil_vv_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -482,10 +482,10 @@ define @vp_ceil_vv_nxv16f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_vv_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI21_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll @@ -516,12 +516,12 @@ define float @extractelt_fadd_nxv4f32_splat( %x) { ; CHECK-LABEL: extractelt_fadd_nxv4f32_splat: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI47_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI47_0)(a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vfmv.f.s ft1, v8 -; CHECK-NEXT: fadd.s fa0, ft1, ft0 +; CHECK-NEXT: vfmv.f.s ft0, v8 +; CHECK-NEXT: lui a0, 263168 +; CHECK-NEXT: fmv.w.x ft1, a0 +; CHECK-NEXT: fadd.s fa0, ft0, ft1 ; CHECK-NEXT: ret %head = insertelement poison, float 3.0, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -533,12 +533,12 @@ define float @extractelt_fsub_nxv4f32_splat( %x) { ; CHECK-LABEL: extractelt_fsub_nxv4f32_splat: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI48_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI48_0)(a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 1 -; CHECK-NEXT: vfmv.f.s ft1, v8 -; CHECK-NEXT: fsub.s fa0, ft0, ft1 +; CHECK-NEXT: vfmv.f.s ft0, v8 +; CHECK-NEXT: lui a0, 263168 +; CHECK-NEXT: fmv.w.x ft1, a0 +; CHECK-NEXT: fsub.s fa0, ft1, ft0 ; CHECK-NEXT: ret %head = insertelement poison, float 3.0, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -550,12 +550,12 @@ define float @extractelt_fmul_nxv4f32_splat( %x) { ; CHECK-LABEL: extractelt_fmul_nxv4f32_splat: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI49_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI49_0)(a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 3 -; CHECK-NEXT: vfmv.f.s ft1, v8 -; CHECK-NEXT: fmul.s fa0, ft1, ft0 +; CHECK-NEXT: vfmv.f.s ft0, v8 +; CHECK-NEXT: lui a0, 263168 +; CHECK-NEXT: fmv.w.x ft1, a0 +; CHECK-NEXT: fmul.s fa0, ft0, ft1 ; CHECK-NEXT: ret %head = insertelement poison, float 3.0, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -567,11 +567,11 @@ define float @extractelt_fdiv_nxv4f32_splat( %x) { ; CHECK-LABEL: extractelt_fdiv_nxv4f32_splat: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI50_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI50_0)(a0) ; CHECK-NEXT: vsetivli zero, 0, e32, m2, ta, ma -; CHECK-NEXT: vfmv.f.s ft1, v8 -; CHECK-NEXT: fdiv.s fa0, ft1, ft0 +; CHECK-NEXT: vfmv.f.s ft0, v8 +; CHECK-NEXT: lui a0, 263168 +; CHECK-NEXT: fmv.w.x ft1, a0 +; CHECK-NEXT: fdiv.s fa0, ft0, ft1 ; CHECK-NEXT: ret %head = insertelement poison, float 3.0, i32 0 %splat = shufflevector %head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll @@ -127,10 +127,10 @@ define @ceil_nxv1f32( %x) { ; CHECK-LABEL: ceil_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -147,10 +147,10 @@ define @ceil_nxv2f32( %x) { ; CHECK-LABEL: ceil_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI7_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -167,10 +167,10 @@ define @ceil_nxv4f32( %x) { ; CHECK-LABEL: ceil_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -187,10 +187,10 @@ define @ceil_nxv8f32( %x) { ; CHECK-LABEL: ceil_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -207,10 +207,10 @@ define @ceil_nxv16f32( %x) { ; CHECK-LABEL: ceil_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 3 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll @@ -127,10 +127,10 @@ define @floor_nxv1f32( %x) { ; CHECK-LABEL: floor_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -147,10 +147,10 @@ define @floor_nxv2f32( %x) { ; CHECK-LABEL: floor_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI7_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -167,10 +167,10 @@ define @floor_nxv4f32( %x) { ; CHECK-LABEL: floor_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -187,10 +187,10 @@ define @floor_nxv8f32( %x) { ; CHECK-LABEL: floor_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -207,10 +207,10 @@ define @floor_nxv16f32( %x) { ; CHECK-LABEL: floor_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll @@ -205,10 +205,10 @@ define <2 x float> @vp_ceil_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -226,12 +226,12 @@ define <2 x float> @vp_ceil_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -253,10 +253,10 @@ define <4 x float> @vp_ceil_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -274,12 +274,12 @@ define <4 x float> @vp_ceil_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI11_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -302,10 +302,10 @@ ; CHECK-LABEL: vp_ceil_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -324,13 +324,13 @@ define <8 x float> @vp_ceil_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -354,10 +354,10 @@ ; CHECK-LABEL: vp_ceil_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 @@ -376,13 +376,13 @@ define <16 x float> @vp_ceil_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ceil_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll @@ -802,12 +802,12 @@ define float @extractelt_fadd_v4f32(<4 x float> %x) { ; CHECK-LABEL: extractelt_fadd_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI40_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI40_0)(a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vfmv.f.s ft1, v8 -; CHECK-NEXT: fadd.s fa0, ft1, ft0 +; CHECK-NEXT: vfmv.f.s ft0, v8 +; CHECK-NEXT: lui a0, 267520 +; CHECK-NEXT: fmv.w.x ft1, a0 +; CHECK-NEXT: fadd.s fa0, ft0, ft1 ; CHECK-NEXT: ret %bo = fadd <4 x float> %x, %ext = extractelement <4 x float> %bo, i32 2 @@ -817,12 +817,12 @@ define float @extractelt_fsub_v4f32(<4 x float> %x) { ; CHECK-LABEL: extractelt_fsub_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI41_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI41_0)(a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vfmv.f.s ft1, v8 -; CHECK-NEXT: fsub.s fa0, ft0, ft1 +; CHECK-NEXT: vfmv.f.s ft0, v8 +; CHECK-NEXT: lui a0, 267520 +; CHECK-NEXT: fmv.w.x ft1, a0 +; CHECK-NEXT: fsub.s fa0, ft1, ft0 ; CHECK-NEXT: ret %bo = fsub <4 x float> , %x %ext = extractelement <4 x float> %bo, i32 2 @@ -832,12 +832,12 @@ define float @extractelt_fmul_v4f32(<4 x float> %x) { ; CHECK-LABEL: extractelt_fmul_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI42_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI42_0)(a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vfmv.f.s ft1, v8 -; CHECK-NEXT: fmul.s fa0, ft1, ft0 +; CHECK-NEXT: vfmv.f.s ft0, v8 +; CHECK-NEXT: lui a0, 267520 +; CHECK-NEXT: fmv.w.x ft1, a0 +; CHECK-NEXT: fmul.s fa0, ft0, ft1 ; CHECK-NEXT: ret %bo = fmul <4 x float> %x, %ext = extractelement <4 x float> %bo, i32 2 @@ -847,12 +847,12 @@ define float @extractelt_fdiv_v4f32(<4 x float> %x) { ; CHECK-LABEL: extractelt_fdiv_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI43_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI43_0)(a0) ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 2 -; CHECK-NEXT: vfmv.f.s ft1, v8 -; CHECK-NEXT: fdiv.s fa0, ft1, ft0 +; CHECK-NEXT: vfmv.f.s ft0, v8 +; CHECK-NEXT: lui a0, 267520 +; CHECK-NEXT: fmv.w.x ft1, a0 +; CHECK-NEXT: fdiv.s fa0, ft0, ft1 ; CHECK-NEXT: ret %bo = fdiv <4 x float> %x, %ext = extractelement <4 x float> %bo, i32 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll @@ -205,10 +205,10 @@ define <2 x float> @vp_floor_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -226,12 +226,12 @@ define <2 x float> @vp_floor_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -253,10 +253,10 @@ define <4 x float> @vp_floor_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -274,12 +274,12 @@ define <4 x float> @vp_floor_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI11_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -302,10 +302,10 @@ ; CHECK-LABEL: vp_floor_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -324,13 +324,13 @@ define <8 x float> @vp_floor_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -354,10 +354,10 @@ ; CHECK-LABEL: vp_floor_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -376,13 +376,13 @@ define <16 x float> @vp_floor_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll @@ -100,14 +100,13 @@ ; CHECK-LABEL: buildvec_dominant0_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: lui a1, %hi(.LCPI4_0) -; CHECK-NEXT: addi a1, a1, %lo(.LCPI4_0) -; CHECK-NEXT: vlse32.v v8, (a1), zero -; CHECK-NEXT: vmv.s.x v9, zero +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: lui a1, 262144 +; CHECK-NEXT: vmv.v.x v9, a1 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma -; CHECK-NEXT: vslideup.vi v8, v9, 2 +; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: ret store <4 x float> , <4 x float>* %x ret void @@ -135,10 +134,9 @@ define void @buildvec_dominant2_v4f32(<4 x float>* %x, float %f) { ; CHECK-LABEL: buildvec_dominant2_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI6_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a1) +; CHECK-NEXT: lui a1, 262144 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vfmv.s.f v8, ft0 +; CHECK-NEXT: vmv.s.x v8, a1 ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 1 @@ -154,31 +152,17 @@ } define void @buildvec_merge0_v4f32(<4 x float>* %x, float %f) { -; RV32-LABEL: buildvec_merge0_v4f32: -; RV32: # %bb.0: -; RV32-NEXT: li a1, 6 -; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; RV32-NEXT: lui a2, %hi(.LCPI7_0) -; RV32-NEXT: flw ft0, %lo(.LCPI7_0)(a2) -; RV32-NEXT: vmv.s.x v0, a1 -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vfmv.v.f v8, fa0 -; RV32-NEXT: vfmerge.vfm v8, v8, ft0, v0 -; RV32-NEXT: vse32.v v8, (a0) -; RV32-NEXT: ret -; -; RV64-LABEL: buildvec_merge0_v4f32: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI7_0) -; RV64-NEXT: flw ft0, %lo(.LCPI7_0)(a1) -; RV64-NEXT: li a1, 6 -; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma -; RV64-NEXT: vmv.s.x v0, a1 -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV64-NEXT: vfmv.v.f v8, fa0 -; RV64-NEXT: vfmerge.vfm v8, v8, ft0, v0 -; RV64-NEXT: vse32.v v8, (a0) -; RV64-NEXT: ret +; CHECK-LABEL: buildvec_merge0_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 6 +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; CHECK-NEXT: vmv.s.x v0, a1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: lui a1, 262144 +; CHECK-NEXT: vmerge.vxm v8, v8, a1, v0 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: ret %v0 = insertelement <4 x float> poison, float %f, i32 0 %v1 = insertelement <4 x float> %v0, float 2.0, i32 1 %v2 = insertelement <4 x float> %v1, float 2.0, i32 2 @@ -267,11 +251,10 @@ define dso_local void @splat_load_licm(float* %0) { ; RV32-LABEL: splat_load_licm: ; RV32: # %bb.0: -; RV32-NEXT: lui a1, %hi(.LCPI12_0) -; RV32-NEXT: addi a1, a1, %lo(.LCPI12_0) -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: vlse32.v v8, (a1), zero ; RV32-NEXT: li a1, 1024 +; RV32-NEXT: lui a2, 263168 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v8, a2 ; RV32-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: addi a1, a1, -4 @@ -282,11 +265,10 @@ ; ; RV64-LABEL: splat_load_licm: ; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI12_0) -; RV64-NEXT: addi a1, a1, %lo(.LCPI12_0) -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV64-NEXT: vlse32.v v8, (a1), zero ; RV64-NEXT: li a1, 1024 +; RV64-NEXT: lui a2, 263168 +; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV64-NEXT: vmv.v.x v8, a2 ; RV64-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 ; RV64-NEXT: vse32.v v8, (a0) ; RV64-NEXT: addiw a1, a1, -4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll @@ -1994,9 +1994,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI92_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI92_0)(a1) ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a1, 307200 +; CHECK-NEXT: fmv.w.x ft0, a1 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -2062,9 +2062,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI95_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI95_0)(a1) ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a1, 307200 +; CHECK-NEXT: fmv.w.x ft0, a1 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a1, 3 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -2134,9 +2134,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI98_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI98_0)(a1) ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a1, 307200 +; CHECK-NEXT: fmv.w.x ft0, a1 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a1, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -2206,9 +2206,9 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) -; CHECK-NEXT: lui a1, %hi(.LCPI101_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI101_0)(a1) ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a1, 307200 +; CHECK-NEXT: fmv.w.x ft0, a1 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a1, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll @@ -130,10 +130,10 @@ define <1 x float> @round_v1f32(<1 x float> %x) { ; CHECK-LABEL: round_v1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -150,10 +150,10 @@ define <2 x float> @round_v2f32(<2 x float> %x) { ; CHECK-LABEL: round_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI7_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -170,10 +170,10 @@ define <4 x float> @round_v4f32(<4 x float> %x) { ; CHECK-LABEL: round_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -190,10 +190,10 @@ define <8 x float> @round_v8f32(<8 x float> %x) { ; CHECK-LABEL: round_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -210,10 +210,10 @@ define <16 x float> @round_v16f32(<16 x float> %x) { ; CHECK-LABEL: round_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll @@ -130,10 +130,10 @@ define <1 x float> @roundeven_v1f32(<1 x float> %x) { ; CHECK-LABEL: roundeven_v1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -150,10 +150,10 @@ define <2 x float> @roundeven_v2f32(<2 x float> %x) { ; CHECK-LABEL: roundeven_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI7_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -170,10 +170,10 @@ define <4 x float> @roundeven_v4f32(<4 x float> %x) { ; CHECK-LABEL: roundeven_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -190,10 +190,10 @@ define <8 x float> @roundeven_v8f32(<8 x float> %x) { ; CHECK-LABEL: roundeven_v8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -210,10 +210,10 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) { ; CHECK-LABEL: roundeven_v16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll @@ -205,10 +205,10 @@ define <2 x float> @vp_nearbyint_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: frflags a0 @@ -226,12 +226,12 @@ define <2 x float> @vp_nearbyint_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: frflags a0 @@ -253,10 +253,10 @@ define <4 x float> @vp_nearbyint_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: frflags a0 @@ -274,12 +274,12 @@ define <4 x float> @vp_nearbyint_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI11_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: frflags a0 @@ -302,10 +302,10 @@ ; CHECK-LABEL: vp_nearbyint_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: frflags a0 @@ -324,13 +324,13 @@ define <8 x float> @vp_nearbyint_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: frflags a0 @@ -354,10 +354,10 @@ ; CHECK-LABEL: vp_nearbyint_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: frflags a0 @@ -376,13 +376,13 @@ define <16 x float> @vp_nearbyint_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: frflags a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll @@ -1269,27 +1269,15 @@ declare float @llvm.vector.reduce.fmin.v2f32(<2 x float>) define float @vreduce_fmin_v2f32(ptr %x) { -; RV32-LABEL: vreduce_fmin_v2f32: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI73_0) -; RV32-NEXT: flw ft0, %lo(.LCPI73_0)(a1) -; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vfmv.s.f v9, ft0 -; RV32-NEXT: vfredmin.vs v8, v8, v9 -; RV32-NEXT: vfmv.f.s fa0, v8 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fmin_v2f32: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI73_0) -; RV64-NEXT: flw ft0, %lo(.LCPI73_0)(a1) -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vfmv.s.f v9, ft0 -; RV64-NEXT: vfredmin.vs v8, v8, v9 -; RV64-NEXT: vfmv.f.s fa0, v8 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fmin_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: lui a0, 523264 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vfredmin.vs v8, v8, v9 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret %v = load <2 x float>, ptr %x %red = call float @llvm.vector.reduce.fmin.v2f32(<2 x float> %v) ret float %red @@ -1298,54 +1286,30 @@ declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>) define float @vreduce_fmin_v4f32(ptr %x) { -; RV32-LABEL: vreduce_fmin_v4f32: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI74_0) -; RV32-NEXT: flw ft0, %lo(.LCPI74_0)(a1) -; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vfmv.s.f v9, ft0 -; RV32-NEXT: vfredmin.vs v8, v8, v9 -; RV32-NEXT: vfmv.f.s fa0, v8 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fmin_v4f32: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI74_0) -; RV64-NEXT: flw ft0, %lo(.LCPI74_0)(a1) -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vfmv.s.f v9, ft0 -; RV64-NEXT: vfredmin.vs v8, v8, v9 -; RV64-NEXT: vfmv.f.s fa0, v8 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fmin_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: lui a0, 523264 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vfredmin.vs v8, v8, v9 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret %v = load <4 x float>, ptr %x %red = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %v) ret float %red } define float @vreduce_fmin_v4f32_nonans(ptr %x) { -; RV32-LABEL: vreduce_fmin_v4f32_nonans: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI75_0) -; RV32-NEXT: flw ft0, %lo(.LCPI75_0)(a1) -; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vfmv.s.f v9, ft0 -; RV32-NEXT: vfredmin.vs v8, v8, v9 -; RV32-NEXT: vfmv.f.s fa0, v8 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fmin_v4f32_nonans: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI75_0) -; RV64-NEXT: flw ft0, %lo(.LCPI75_0)(a1) -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vfmv.s.f v9, ft0 -; RV64-NEXT: vfredmin.vs v8, v8, v9 -; RV64-NEXT: vfmv.f.s fa0, v8 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fmin_v4f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: lui a0, 522240 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vfredmin.vs v8, v8, v9 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret %v = load <4 x float>, ptr %x %red = call nnan float @llvm.vector.reduce.fmin.v4f32(<4 x float> %v) ret float %red @@ -1392,12 +1356,11 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v24, (a0) ; CHECK-NEXT: vle32.v v0, (a1) -; CHECK-NEXT: lui a0, %hi(.LCPI77_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI77_0)(a0) ; CHECK-NEXT: vfmin.vv v16, v24, v16 ; CHECK-NEXT: vfmin.vv v8, v8, v0 ; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: vfmv.s.f v16, ft0 +; CHECK-NEXT: lui a0, 523264 +; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vfredmin.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -1556,27 +1519,15 @@ declare half @llvm.vector.reduce.fmax.v2f16(<2 x half>) define half @vreduce_fmax_v2f16(ptr %x) { -; RV32-LABEL: vreduce_fmax_v2f16: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI83_0) -; RV32-NEXT: flh ft0, %lo(.LCPI83_0)(a1) -; RV32-NEXT: vle16.v v8, (a0) -; RV32-NEXT: vfmv.s.f v9, ft0 -; RV32-NEXT: vfredmax.vs v8, v8, v9 -; RV32-NEXT: vfmv.f.s fa0, v8 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fmax_v2f16: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI83_0) -; RV64-NEXT: flh ft0, %lo(.LCPI83_0)(a1) -; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; RV64-NEXT: vle16.v v8, (a0) -; RV64-NEXT: vfmv.s.f v9, ft0 -; RV64-NEXT: vfredmax.vs v8, v8, v9 -; RV64-NEXT: vfmv.f.s fa0, v8 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fmax_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: li a0, -512 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vfredmax.vs v8, v8, v9 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret %v = load <2 x half>, ptr %x %red = call half @llvm.vector.reduce.fmax.v2f16(<2 x half> %v) ret half %red @@ -1585,81 +1536,45 @@ declare half @llvm.vector.reduce.fmax.v4f16(<4 x half>) define half @vreduce_fmax_v4f16(ptr %x) { -; RV32-LABEL: vreduce_fmax_v4f16: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI84_0) -; RV32-NEXT: flh ft0, %lo(.LCPI84_0)(a1) -; RV32-NEXT: vle16.v v8, (a0) -; RV32-NEXT: vfmv.s.f v9, ft0 -; RV32-NEXT: vfredmax.vs v8, v8, v9 -; RV32-NEXT: vfmv.f.s fa0, v8 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fmax_v4f16: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI84_0) -; RV64-NEXT: flh ft0, %lo(.LCPI84_0)(a1) -; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; RV64-NEXT: vle16.v v8, (a0) -; RV64-NEXT: vfmv.s.f v9, ft0 -; RV64-NEXT: vfredmax.vs v8, v8, v9 -; RV64-NEXT: vfmv.f.s fa0, v8 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fmax_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: li a0, -512 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vfredmax.vs v8, v8, v9 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret %v = load <4 x half>, ptr %x %red = call half @llvm.vector.reduce.fmax.v4f16(<4 x half> %v) ret half %red } define half @vreduce_fmax_v4f16_nonans(ptr %x) { -; RV32-LABEL: vreduce_fmax_v4f16_nonans: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI85_0) -; RV32-NEXT: flh ft0, %lo(.LCPI85_0)(a1) -; RV32-NEXT: vle16.v v8, (a0) -; RV32-NEXT: vfmv.s.f v9, ft0 -; RV32-NEXT: vfredmax.vs v8, v8, v9 -; RV32-NEXT: vfmv.f.s fa0, v8 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fmax_v4f16_nonans: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI85_0) -; RV64-NEXT: flh ft0, %lo(.LCPI85_0)(a1) -; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; RV64-NEXT: vle16.v v8, (a0) -; RV64-NEXT: vfmv.s.f v9, ft0 -; RV64-NEXT: vfredmax.vs v8, v8, v9 -; RV64-NEXT: vfmv.f.s fa0, v8 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fmax_v4f16_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: li a0, -1024 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vfredmax.vs v8, v8, v9 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret %v = load <4 x half>, ptr %x %red = call nnan half @llvm.vector.reduce.fmax.v4f16(<4 x half> %v) ret half %red } define half @vreduce_fmax_v4f16_nonans_noinfs(ptr %x) { -; RV32-LABEL: vreduce_fmax_v4f16_nonans_noinfs: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI86_0) -; RV32-NEXT: flh ft0, %lo(.LCPI86_0)(a1) -; RV32-NEXT: vle16.v v8, (a0) -; RV32-NEXT: vfmv.s.f v9, ft0 -; RV32-NEXT: vfredmax.vs v8, v8, v9 -; RV32-NEXT: vfmv.f.s fa0, v8 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fmax_v4f16_nonans_noinfs: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI86_0) -; RV64-NEXT: flh ft0, %lo(.LCPI86_0)(a1) -; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma -; RV64-NEXT: vle16.v v8, (a0) -; RV64-NEXT: vfmv.s.f v9, ft0 -; RV64-NEXT: vfredmax.vs v8, v8, v9 -; RV64-NEXT: vfmv.f.s fa0, v8 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fmax_v4f16_nonans_noinfs: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: li a0, -1025 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vfredmax.vs v8, v8, v9 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret %v = load <4 x half>, ptr %x %red = call nnan ninf half @llvm.vector.reduce.fmax.v4f16(<4 x half> %v) ret half %red @@ -1674,10 +1589,9 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: addi a0, a0, 128 -; CHECK-NEXT: lui a1, %hi(.LCPI87_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI87_0)(a1) ; CHECK-NEXT: vle16.v v16, (a0) -; CHECK-NEXT: vfmv.s.f v24, ft0 +; CHECK-NEXT: li a0, -512 +; CHECK-NEXT: vmv.s.x v24, a0 ; CHECK-NEXT: vfmax.vv v8, v8, v16 ; CHECK-NEXT: vfredmax.vs v8, v8, v24 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1690,27 +1604,15 @@ declare float @llvm.vector.reduce.fmax.v2f32(<2 x float>) define float @vreduce_fmax_v2f32(ptr %x) { -; RV32-LABEL: vreduce_fmax_v2f32: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI88_0) -; RV32-NEXT: flw ft0, %lo(.LCPI88_0)(a1) -; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vfmv.s.f v9, ft0 -; RV32-NEXT: vfredmax.vs v8, v8, v9 -; RV32-NEXT: vfmv.f.s fa0, v8 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fmax_v2f32: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI88_0) -; RV64-NEXT: flw ft0, %lo(.LCPI88_0)(a1) -; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vfmv.s.f v9, ft0 -; RV64-NEXT: vfredmax.vs v8, v8, v9 -; RV64-NEXT: vfmv.f.s fa0, v8 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fmax_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: lui a0, 1047552 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vfredmax.vs v8, v8, v9 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret %v = load <2 x float>, ptr %x %red = call float @llvm.vector.reduce.fmax.v2f32(<2 x float> %v) ret float %red @@ -1719,54 +1621,30 @@ declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>) define float @vreduce_fmax_v4f32(ptr %x) { -; RV32-LABEL: vreduce_fmax_v4f32: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI89_0) -; RV32-NEXT: flw ft0, %lo(.LCPI89_0)(a1) -; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vfmv.s.f v9, ft0 -; RV32-NEXT: vfredmax.vs v8, v8, v9 -; RV32-NEXT: vfmv.f.s fa0, v8 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fmax_v4f32: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI89_0) -; RV64-NEXT: flw ft0, %lo(.LCPI89_0)(a1) -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vfmv.s.f v9, ft0 -; RV64-NEXT: vfredmax.vs v8, v8, v9 -; RV64-NEXT: vfmv.f.s fa0, v8 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fmax_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: lui a0, 1047552 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vfredmax.vs v8, v8, v9 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret %v = load <4 x float>, ptr %x %red = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %v) ret float %red } define float @vreduce_fmax_v4f32_nonans(ptr %x) { -; RV32-LABEL: vreduce_fmax_v4f32_nonans: -; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV32-NEXT: lui a1, %hi(.LCPI90_0) -; RV32-NEXT: flw ft0, %lo(.LCPI90_0)(a1) -; RV32-NEXT: vle32.v v8, (a0) -; RV32-NEXT: vfmv.s.f v9, ft0 -; RV32-NEXT: vfredmax.vs v8, v8, v9 -; RV32-NEXT: vfmv.f.s fa0, v8 -; RV32-NEXT: ret -; -; RV64-LABEL: vreduce_fmax_v4f32_nonans: -; RV64: # %bb.0: -; RV64-NEXT: lui a1, %hi(.LCPI90_0) -; RV64-NEXT: flw ft0, %lo(.LCPI90_0)(a1) -; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; RV64-NEXT: vle32.v v8, (a0) -; RV64-NEXT: vfmv.s.f v9, ft0 -; RV64-NEXT: vfredmax.vs v8, v8, v9 -; RV64-NEXT: vfmv.f.s fa0, v8 -; RV64-NEXT: ret +; CHECK-LABEL: vreduce_fmax_v4f32_nonans: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: lui a0, 1046528 +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vfredmax.vs v8, v8, v9 +; CHECK-NEXT: vfmv.f.s fa0, v8 +; CHECK-NEXT: ret %v = load <4 x float>, ptr %x %red = call nnan float @llvm.vector.reduce.fmax.v4f32(<4 x float> %v) ret float %red @@ -1813,12 +1691,11 @@ ; CHECK-NEXT: addi a0, a0, 128 ; CHECK-NEXT: vle32.v v24, (a0) ; CHECK-NEXT: vle32.v v0, (a1) -; CHECK-NEXT: lui a0, %hi(.LCPI92_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI92_0)(a0) ; CHECK-NEXT: vfmax.vv v16, v24, v16 ; CHECK-NEXT: vfmax.vv v8, v8, v0 ; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: vfmv.s.f v16, ft0 +; CHECK-NEXT: lui a0, 1047552 +; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vfredmax.vs v8, v8, v16 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll @@ -189,10 +189,10 @@ define <2 x float> @vp_rint_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma @@ -208,12 +208,12 @@ define <2 x float> @vp_rint_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma @@ -233,10 +233,10 @@ define <4 x float> @vp_rint_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma @@ -252,12 +252,12 @@ define <4 x float> @vp_rint_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI11_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma @@ -278,10 +278,10 @@ ; CHECK-LABEL: vp_rint_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma @@ -298,13 +298,13 @@ define <8 x float> @vp_rint_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma @@ -326,10 +326,10 @@ ; CHECK-LABEL: vp_rint_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma @@ -346,13 +346,13 @@ define <16 x float> @vp_rint_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll @@ -205,10 +205,10 @@ define <2 x float> @vp_round_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -226,12 +226,12 @@ define <2 x float> @vp_round_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -253,10 +253,10 @@ define <4 x float> @vp_round_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -274,12 +274,12 @@ define <4 x float> @vp_round_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI11_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -302,10 +302,10 @@ ; CHECK-LABEL: vp_round_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -324,13 +324,13 @@ define <8 x float> @vp_round_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -354,10 +354,10 @@ ; CHECK-LABEL: vp_round_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -376,13 +376,13 @@ define <16 x float> @vp_round_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll @@ -205,10 +205,10 @@ define <2 x float> @vp_roundeven_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -226,12 +226,12 @@ define <2 x float> @vp_roundeven_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -253,10 +253,10 @@ define <4 x float> @vp_roundeven_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -274,12 +274,12 @@ define <4 x float> @vp_roundeven_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI11_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -302,10 +302,10 @@ ; CHECK-LABEL: vp_roundeven_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -324,13 +324,13 @@ define <8 x float> @vp_roundeven_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -354,10 +354,10 @@ ; CHECK-LABEL: vp_roundeven_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -376,13 +376,13 @@ define <16 x float> @vp_roundeven_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll @@ -205,10 +205,10 @@ define <2 x float> @vp_roundtozero_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -226,12 +226,12 @@ define <2 x float> @vp_roundtozero_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1) ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -253,10 +253,10 @@ define <4 x float> @vp_roundtozero_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -274,12 +274,12 @@ define <4 x float> @vp_roundtozero_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI11_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1) ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -302,10 +302,10 @@ ; CHECK-LABEL: vp_roundtozero_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -324,13 +324,13 @@ define <8 x float> @vp_roundtozero_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmset.m v10 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -354,10 +354,10 @@ ; CHECK-LABEL: vp_roundtozero_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -376,13 +376,13 @@ define <16 x float> @vp_roundtozero_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_v16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmset.m v12 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll @@ -13,10 +13,10 @@ define @trunc_nxv1f32_to_si8( %x) { ; RV32-LABEL: trunc_nxv1f32_to_si8: ; RV32: # %bb.0: -; RV32-NEXT: lui a0, %hi(.LCPI0_0) -; RV32-NEXT: flw ft0, %lo(.LCPI0_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: lui a0, 307200 +; RV32-NEXT: fmv.w.x ft0, a0 ; RV32-NEXT: vmflt.vf v0, v9, ft0 ; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -30,10 +30,10 @@ ; ; RV64-LABEL: trunc_nxv1f32_to_si8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI0_0) -; RV64-NEXT: flw ft0, %lo(.LCPI0_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: lui a0, 307200 +; RV64-NEXT: fmv.w.x ft0, a0 ; RV64-NEXT: vmflt.vf v0, v9, ft0 ; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -52,10 +52,10 @@ define @trunc_nxv1f32_to_ui8( %x) { ; RV32-LABEL: trunc_nxv1f32_to_ui8: ; RV32: # %bb.0: -; RV32-NEXT: lui a0, %hi(.LCPI1_0) -; RV32-NEXT: flw ft0, %lo(.LCPI1_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: lui a0, 307200 +; RV32-NEXT: fmv.w.x ft0, a0 ; RV32-NEXT: vmflt.vf v0, v9, ft0 ; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -69,10 +69,10 @@ ; ; RV64-LABEL: trunc_nxv1f32_to_ui8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI1_0) -; RV64-NEXT: flw ft0, %lo(.LCPI1_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: lui a0, 307200 +; RV64-NEXT: fmv.w.x ft0, a0 ; RV64-NEXT: vmflt.vf v0, v9, ft0 ; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -207,10 +207,10 @@ define @trunc_nxv4f32_to_si8( %x) { ; RV32-LABEL: trunc_nxv4f32_to_si8: ; RV32: # %bb.0: -; RV32-NEXT: lui a0, %hi(.LCPI8_0) -; RV32-NEXT: flw ft0, %lo(.LCPI8_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: lui a0, 307200 +; RV32-NEXT: fmv.w.x ft0, a0 ; RV32-NEXT: vmflt.vf v0, v10, ft0 ; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t @@ -224,10 +224,10 @@ ; ; RV64-LABEL: trunc_nxv4f32_to_si8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI8_0) -; RV64-NEXT: flw ft0, %lo(.LCPI8_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: lui a0, 307200 +; RV64-NEXT: fmv.w.x ft0, a0 ; RV64-NEXT: vmflt.vf v0, v10, ft0 ; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t @@ -246,10 +246,10 @@ define @trunc_nxv4f32_to_ui8( %x) { ; RV32-LABEL: trunc_nxv4f32_to_ui8: ; RV32: # %bb.0: -; RV32-NEXT: lui a0, %hi(.LCPI9_0) -; RV32-NEXT: flw ft0, %lo(.LCPI9_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: lui a0, 307200 +; RV32-NEXT: fmv.w.x ft0, a0 ; RV32-NEXT: vmflt.vf v0, v10, ft0 ; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t @@ -263,10 +263,10 @@ ; ; RV64-LABEL: trunc_nxv4f32_to_ui8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI9_0) -; RV64-NEXT: flw ft0, %lo(.LCPI9_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: lui a0, 307200 +; RV64-NEXT: fmv.w.x ft0, a0 ; RV64-NEXT: vmflt.vf v0, v10, ft0 ; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t @@ -401,10 +401,10 @@ define @ceil_nxv1f32_to_si8( %x) { ; RV32-LABEL: ceil_nxv1f32_to_si8: ; RV32: # %bb.0: -; RV32-NEXT: lui a0, %hi(.LCPI16_0) -; RV32-NEXT: flw ft0, %lo(.LCPI16_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: lui a0, 307200 +; RV32-NEXT: fmv.w.x ft0, a0 ; RV32-NEXT: vmflt.vf v0, v9, ft0 ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -420,10 +420,10 @@ ; ; RV64-LABEL: ceil_nxv1f32_to_si8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI16_0) -; RV64-NEXT: flw ft0, %lo(.LCPI16_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: lui a0, 307200 +; RV64-NEXT: fmv.w.x ft0, a0 ; RV64-NEXT: vmflt.vf v0, v9, ft0 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -444,10 +444,10 @@ define @ceil_nxv1f32_to_ui8( %x) { ; RV32-LABEL: ceil_nxv1f32_to_ui8: ; RV32: # %bb.0: -; RV32-NEXT: lui a0, %hi(.LCPI17_0) -; RV32-NEXT: flw ft0, %lo(.LCPI17_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vfabs.v v9, v8 +; RV32-NEXT: lui a0, 307200 +; RV32-NEXT: fmv.w.x ft0, a0 ; RV32-NEXT: vmflt.vf v0, v9, ft0 ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -463,10 +463,10 @@ ; ; RV64-LABEL: ceil_nxv1f32_to_ui8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI17_0) -; RV64-NEXT: flw ft0, %lo(.LCPI17_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vfabs.v v9, v8 +; RV64-NEXT: lui a0, 307200 +; RV64-NEXT: fmv.w.x ft0, a0 ; RV64-NEXT: vmflt.vf v0, v9, ft0 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -639,10 +639,10 @@ define @ceil_nxv4f32_to_si8( %x) { ; RV32-LABEL: ceil_nxv4f32_to_si8: ; RV32: # %bb.0: -; RV32-NEXT: lui a0, %hi(.LCPI24_0) -; RV32-NEXT: flw ft0, %lo(.LCPI24_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: lui a0, 307200 +; RV32-NEXT: fmv.w.x ft0, a0 ; RV32-NEXT: vmflt.vf v0, v10, ft0 ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -658,10 +658,10 @@ ; ; RV64-LABEL: ceil_nxv4f32_to_si8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI24_0) -; RV64-NEXT: flw ft0, %lo(.LCPI24_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: lui a0, 307200 +; RV64-NEXT: fmv.w.x ft0, a0 ; RV64-NEXT: vmflt.vf v0, v10, ft0 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -682,10 +682,10 @@ define @ceil_nxv4f32_to_ui8( %x) { ; RV32-LABEL: ceil_nxv4f32_to_ui8: ; RV32: # %bb.0: -; RV32-NEXT: lui a0, %hi(.LCPI25_0) -; RV32-NEXT: flw ft0, %lo(.LCPI25_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vfabs.v v10, v8 +; RV32-NEXT: lui a0, 307200 +; RV32-NEXT: fmv.w.x ft0, a0 ; RV32-NEXT: vmflt.vf v0, v10, ft0 ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -701,10 +701,10 @@ ; ; RV64-LABEL: ceil_nxv4f32_to_ui8: ; RV64: # %bb.0: -; RV64-NEXT: lui a0, %hi(.LCPI25_0) -; RV64-NEXT: flw ft0, %lo(.LCPI25_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vfabs.v v10, v8 +; RV64-NEXT: lui a0, 307200 +; RV64-NEXT: fmv.w.x ft0, a0 ; RV64-NEXT: vmflt.vf v0, v10, ft0 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll @@ -279,10 +279,10 @@ define @vp_floor_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -300,10 +300,10 @@ define @vp_floor_nxv1f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -323,10 +323,10 @@ define @vp_floor_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -344,10 +344,10 @@ define @vp_floor_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -368,10 +368,10 @@ ; CHECK-LABEL: vp_floor_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI16_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -390,10 +390,10 @@ define @vp_floor_nxv4f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI17_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -414,10 +414,10 @@ ; CHECK-LABEL: vp_floor_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -436,10 +436,10 @@ define @vp_floor_nxv8f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI19_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -460,10 +460,10 @@ ; CHECK-LABEL: vp_floor_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 2 @@ -482,10 +482,10 @@ define @vp_floor_nxv16f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_floor_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI21_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 2 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll @@ -129,10 +129,10 @@ define @round_nxv1f32( %x) { ; CHECK-LABEL: round_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -149,10 +149,10 @@ define @round_nxv2f32( %x) { ; CHECK-LABEL: round_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI7_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -169,10 +169,10 @@ define @round_nxv4f32( %x) { ; CHECK-LABEL: round_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -189,10 +189,10 @@ define @round_nxv8f32( %x) { ; CHECK-LABEL: round_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -209,10 +209,10 @@ define @round_nxv16f32( %x) { ; CHECK-LABEL: round_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll @@ -129,10 +129,10 @@ define @roundeven_nxv1f32( %x) { ; CHECK-LABEL: roundeven_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -149,10 +149,10 @@ define @roundeven_nxv2f32( %x) { ; CHECK-LABEL: roundeven_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI7_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -169,10 +169,10 @@ define @roundeven_nxv4f32( %x) { ; CHECK-LABEL: roundeven_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -189,10 +189,10 @@ define @roundeven_nxv8f32( %x) { ; CHECK-LABEL: roundeven_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -209,10 +209,10 @@ define @roundeven_nxv16f32( %x) { ; CHECK-LABEL: roundeven_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll @@ -115,10 +115,10 @@ define @trunc_nxv1f32( %x) { ; CHECK-LABEL: trunc_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI6_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -133,10 +133,10 @@ define @trunc_nxv2f32( %x) { ; CHECK-LABEL: trunc_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI7_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -151,10 +151,10 @@ define @trunc_nxv4f32( %x) { ; CHECK-LABEL: trunc_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI8_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t @@ -169,10 +169,10 @@ define @trunc_nxv8f32( %x) { ; CHECK-LABEL: trunc_nxv8f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI9_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t @@ -187,10 +187,10 @@ define @trunc_nxv16f32( %x) { ; CHECK-LABEL: trunc_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI10_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0) ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll @@ -279,10 +279,10 @@ define @vp_nearbyint_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: frflags a0 @@ -300,10 +300,10 @@ define @vp_nearbyint_nxv1f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -323,10 +323,10 @@ define @vp_nearbyint_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: frflags a0 @@ -344,10 +344,10 @@ define @vp_nearbyint_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -368,10 +368,10 @@ ; CHECK-LABEL: vp_nearbyint_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI16_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: frflags a0 @@ -390,10 +390,10 @@ define @vp_nearbyint_nxv4f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI17_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -414,10 +414,10 @@ ; CHECK-LABEL: vp_nearbyint_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: frflags a0 @@ -436,10 +436,10 @@ define @vp_nearbyint_nxv8f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI19_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -460,10 +460,10 @@ ; CHECK-LABEL: vp_nearbyint_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: frflags a0 @@ -482,10 +482,10 @@ define @vp_nearbyint_nxv16f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_nearbyint_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI21_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: frflags a0 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll @@ -255,10 +255,10 @@ define @vp_rint_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma @@ -274,10 +274,10 @@ define @vp_rint_nxv1f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -295,10 +295,10 @@ define @vp_rint_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma @@ -314,10 +314,10 @@ define @vp_rint_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t @@ -336,10 +336,10 @@ ; CHECK-LABEL: vp_rint_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI16_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma @@ -356,10 +356,10 @@ define @vp_rint_nxv4f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI17_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t @@ -378,10 +378,10 @@ ; CHECK-LABEL: vp_rint_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma @@ -398,10 +398,10 @@ define @vp_rint_nxv8f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI19_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t @@ -420,10 +420,10 @@ ; CHECK-LABEL: vp_rint_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma @@ -440,10 +440,10 @@ define @vp_rint_nxv16f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_rint_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI21_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t ; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll @@ -279,10 +279,10 @@ define @vp_round_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -300,10 +300,10 @@ define @vp_round_nxv1f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -323,10 +323,10 @@ define @vp_round_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -344,10 +344,10 @@ define @vp_round_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -368,10 +368,10 @@ ; CHECK-LABEL: vp_round_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI16_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -390,10 +390,10 @@ define @vp_round_nxv4f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI17_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -414,10 +414,10 @@ ; CHECK-LABEL: vp_round_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -436,10 +436,10 @@ define @vp_round_nxv8f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI19_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -460,10 +460,10 @@ ; CHECK-LABEL: vp_round_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 4 @@ -482,10 +482,10 @@ define @vp_round_nxv16f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_round_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI21_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 4 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll @@ -279,10 +279,10 @@ define @vp_roundeven_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -300,10 +300,10 @@ define @vp_roundeven_nxv1f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -323,10 +323,10 @@ define @vp_roundeven_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -344,10 +344,10 @@ define @vp_roundeven_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -368,10 +368,10 @@ ; CHECK-LABEL: vp_roundeven_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI16_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -390,10 +390,10 @@ define @vp_roundeven_nxv4f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI17_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -414,10 +414,10 @@ ; CHECK-LABEL: vp_roundeven_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -436,10 +436,10 @@ define @vp_roundeven_nxv8f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI19_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -460,10 +460,10 @@ ; CHECK-LABEL: vp_roundeven_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 0 @@ -482,10 +482,10 @@ define @vp_roundeven_nxv16f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundeven_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI21_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 0 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll @@ -279,10 +279,10 @@ define @vp_roundtozero_nxv1f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI12_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI12_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -300,10 +300,10 @@ define @vp_roundtozero_nxv1f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv1f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI13_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 1 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -323,10 +323,10 @@ define @vp_roundtozero_nxv2f32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI14_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI14_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vf v0, v9, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -344,10 +344,10 @@ define @vp_roundtozero_nxv2f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv2f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI15_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vfabs.v v9, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v9, ft0 ; CHECK-NEXT: fsrmi a0, 1 ; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t @@ -368,10 +368,10 @@ ; CHECK-LABEL: vp_roundtozero_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI16_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI16_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v12, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v12, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -390,10 +390,10 @@ define @vp_roundtozero_nxv4f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv4f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI17_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI17_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vfabs.v v10, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v10, ft0 ; CHECK-NEXT: fsrmi a0, 1 ; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t @@ -414,10 +414,10 @@ ; CHECK-LABEL: vp_roundtozero_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI18_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI18_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v16, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v16, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -436,10 +436,10 @@ define @vp_roundtozero_nxv8f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv8f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI19_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI19_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vfabs.v v12, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v12, ft0 ; CHECK-NEXT: fsrmi a0, 1 ; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t @@ -460,10 +460,10 @@ ; CHECK-LABEL: vp_roundtozero_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v16, v0 -; CHECK-NEXT: lui a1, %hi(.LCPI20_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI20_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v24, v8, v0.t +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v24, ft0, v0.t ; CHECK-NEXT: fsrmi a0, 1 @@ -482,10 +482,10 @@ define @vp_roundtozero_nxv16f32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_roundtozero_nxv16f32_unmasked: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a1, %hi(.LCPI21_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI21_0)(a1) ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vfabs.v v16, v8 +; CHECK-NEXT: lui a0, 307200 +; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vmflt.vf v0, v16, ft0 ; CHECK-NEXT: fsrmi a0, 1 ; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll @@ -516,10 +516,9 @@ define float @vreduce_fmin_nxv1f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI36_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI36_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfmv.s.f v9, ft0 +; CHECK-NEXT: lui a0, 523264 +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -530,10 +529,9 @@ define float @vreduce_fmin_nxv1f32_nonans( %v) { ; CHECK-LABEL: vreduce_fmin_nxv1f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI37_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI37_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfmv.s.f v9, ft0 +; CHECK-NEXT: lui a0, 522240 +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -560,10 +558,9 @@ define float @vreduce_fmin_nxv2f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI39_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI39_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; CHECK-NEXT: vfmv.s.f v9, ft0 +; CHECK-NEXT: lui a0, 523264 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vfredmin.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -576,10 +573,9 @@ define float @vreduce_fmin_nxv4f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI40_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI40_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma -; CHECK-NEXT: vfmv.s.f v10, ft0 +; CHECK-NEXT: lui a0, 523264 +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vfredmin.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -592,12 +588,11 @@ define float @vreduce_fmin_nxv32f32( %v) { ; CHECK-LABEL: vreduce_fmin_nxv32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI41_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI41_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: lui a0, 523264 +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; CHECK-NEXT: vmv.s.x v24, a0 ; CHECK-NEXT: vfmin.vv v8, v8, v16 -; CHECK-NEXT: vfmv.s.f v16, ft0 -; CHECK-NEXT: vfredmin.vs v8, v8, v16 +; CHECK-NEXT: vfredmin.vs v8, v8, v24 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fmin.nxv32f32( %v) @@ -702,10 +697,9 @@ define half @vreduce_fmax_nxv1f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv1f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI48_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI48_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfmv.s.f v9, ft0 +; CHECK-NEXT: li a0, -512 +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -716,10 +710,9 @@ define half @vreduce_fmax_nxv1f16_nonans( %v) #0 { ; CHECK-LABEL: vreduce_fmax_nxv1f16_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI49_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI49_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfmv.s.f v9, ft0 +; CHECK-NEXT: li a0, -1024 +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -730,10 +723,9 @@ define half @vreduce_fmax_nxv1f16_nonans_noinfs( %v) #1 { ; CHECK-LABEL: vreduce_fmax_nxv1f16_nonans_noinfs: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI50_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI50_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfmv.s.f v9, ft0 +; CHECK-NEXT: li a0, -1025 +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -746,10 +738,9 @@ define half @vreduce_fmax_nxv2f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv2f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI51_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI51_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfmv.s.f v9, ft0 +; CHECK-NEXT: li a0, -512 +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -762,10 +753,9 @@ define half @vreduce_fmax_nxv4f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv4f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI52_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI52_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vfmv.s.f v9, ft0 +; CHECK-NEXT: li a0, -512 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -778,12 +768,11 @@ define half @vreduce_fmax_nxv64f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI53_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI53_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: li a0, -512 +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; CHECK-NEXT: vmv.s.x v24, a0 ; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: vfmv.s.f v16, ft0 -; CHECK-NEXT: vfredmax.vs v8, v8, v16 +; CHECK-NEXT: vfredmax.vs v8, v8, v24 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %red = call half @llvm.vector.reduce.fmax.nxv64f16( %v) @@ -795,10 +784,9 @@ define float @vreduce_fmax_nxv1f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv1f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI54_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI54_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfmv.s.f v9, ft0 +; CHECK-NEXT: lui a0, 1047552 +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -809,10 +797,9 @@ define float @vreduce_fmax_nxv1f32_nonans( %v) { ; CHECK-LABEL: vreduce_fmax_nxv1f32_nonans: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI55_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI55_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfmv.s.f v9, ft0 +; CHECK-NEXT: lui a0, 1046528 +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -839,10 +826,9 @@ define float @vreduce_fmax_nxv2f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv2f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI57_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI57_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; CHECK-NEXT: vfmv.s.f v9, ft0 +; CHECK-NEXT: lui a0, 1047552 +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma +; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vfredmax.vs v8, v8, v9 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -855,10 +841,9 @@ define float @vreduce_fmax_nxv4f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv4f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI58_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI58_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma -; CHECK-NEXT: vfmv.s.f v10, ft0 +; CHECK-NEXT: lui a0, 1047552 +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vfredmax.vs v8, v8, v10 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret @@ -871,12 +856,11 @@ define float @vreduce_fmax_nxv32f32( %v) { ; CHECK-LABEL: vreduce_fmax_nxv32f32: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI59_0) -; CHECK-NEXT: flw ft0, %lo(.LCPI59_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: lui a0, 1047552 +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma +; CHECK-NEXT: vmv.s.x v24, a0 ; CHECK-NEXT: vfmax.vv v8, v8, v16 -; CHECK-NEXT: vfmv.s.f v16, ft0 -; CHECK-NEXT: vfredmax.vs v8, v8, v16 +; CHECK-NEXT: vfredmax.vs v8, v8, v24 ; CHECK-NEXT: vfmv.f.s fa0, v8 ; CHECK-NEXT: ret %red = call float @llvm.vector.reduce.fmax.nxv32f32( %v) @@ -999,10 +983,9 @@ ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a1, a1, a0 ; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: fmv.h.x ft0, zero -; CHECK-NEXT: fneg.h ft0, ft0 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a2, 1048568 +; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv.v.x v9, a2 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma @@ -1019,13 +1002,12 @@ define half @vreduce_ord_fadd_nxv6f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv6f16: ; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 1048568 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: fmv.h.x ft0, zero -; CHECK-NEXT: fneg.h ft0, ft0 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma -; CHECK-NEXT: vfmv.v.f v10, ft0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v9, v10, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma @@ -1042,13 +1024,12 @@ define half @vreduce_ord_fadd_nxv10f16( %v, half %s) { ; CHECK-LABEL: vreduce_ord_fadd_nxv10f16: ; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 1048568 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv.v.x v12, a0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: fmv.h.x ft0, zero -; CHECK-NEXT: fneg.h ft0, ft0 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma -; CHECK-NEXT: vfmv.v.f v12, ft0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v10, v12, a0 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma @@ -1071,9 +1052,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vfmv.s.f v12, fa0 -; CHECK-NEXT: fmv.h.x ft0, zero -; CHECK-NEXT: fneg.h ft0, ft0 -; CHECK-NEXT: vfmv.v.f v11, ft0 +; CHECK-NEXT: lui a0, 1048568 +; CHECK-NEXT: vmv.v.x v11, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfredosum.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 @@ -1091,10 +1071,9 @@ ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a1, a1, a0 ; CHECK-NEXT: add a0, a1, a0 -; CHECK-NEXT: fmv.h.x ft0, zero -; CHECK-NEXT: fneg.h ft0, ft0 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma -; CHECK-NEXT: vfmv.v.f v9, ft0 +; CHECK-NEXT: lui a2, 1048568 +; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv.v.x v9, a2 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma @@ -1109,13 +1088,12 @@ define half @vreduce_fadd_nxv6f16( %v, half %s) { ; CHECK-LABEL: vreduce_fadd_nxv6f16: ; CHECK: # %bb.0: +; CHECK-NEXT: lui a0, 1048568 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 -; CHECK-NEXT: fmv.h.x ft0, zero -; CHECK-NEXT: fneg.h ft0, ft0 -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma -; CHECK-NEXT: vfmv.v.f v10, ft0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v9, v10, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma @@ -1159,11 +1137,10 @@ define half @vreduce_fmax_nxv12f16( %v) { ; CHECK-LABEL: vreduce_fmax_nxv12f16: ; CHECK: # %bb.0: -; CHECK-NEXT: lui a0, %hi(.LCPI74_0) -; CHECK-NEXT: flh ft0, %lo(.LCPI74_0)(a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vfmv.s.f v12, ft0 -; CHECK-NEXT: vfmv.v.f v11, ft0 +; CHECK-NEXT: li a0, -512 +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma +; CHECK-NEXT: vmv.s.x v12, a0 +; CHECK-NEXT: vmv.v.x v11, a0 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vfredmax.vs v8, v8, v12 ; CHECK-NEXT: vfmv.f.s fa0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -137,13 +137,11 @@ ; CHECK-NEXT: vse64.v v10, (a1) ; CHECK-NEXT: j .LBB3_3 ; CHECK-NEXT: .LBB3_2: # %if.else -; CHECK-NEXT: lui a1, %hi(.LCPI3_2) -; CHECK-NEXT: addi a1, a1, %lo(.LCPI3_2) +; CHECK-NEXT: lui a1, 260096 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma -; CHECK-NEXT: vlse32.v v10, (a1), zero -; CHECK-NEXT: lui a1, %hi(.LCPI3_3) -; CHECK-NEXT: addi a1, a1, %lo(.LCPI3_3) -; CHECK-NEXT: vlse32.v v11, (a1), zero +; CHECK-NEXT: vmv.v.x v10, a1 +; CHECK-NEXT: lui a1, 262144 +; CHECK-NEXT: vmv.v.x v11, a1 ; CHECK-NEXT: vfadd.vv v10, v10, v11 ; CHECK-NEXT: lui a1, %hi(scratch) ; CHECK-NEXT: addi a1, a1, %lo(scratch) @@ -263,12 +261,10 @@ ; CHECK-NEXT: bnez a1, .LBB5_2 ; CHECK-NEXT: .LBB5_4: # %if.else5 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu -; CHECK-NEXT: lui a0, %hi(.LCPI5_2) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_2) -; CHECK-NEXT: vlse32.v v9, (a0), zero -; CHECK-NEXT: lui a0, %hi(.LCPI5_3) -; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_3) -; CHECK-NEXT: vlse32.v v10, (a0), zero +; CHECK-NEXT: lui a0, 260096 +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: lui a0, 262144 +; CHECK-NEXT: vmv.v.x v10, a0 ; CHECK-NEXT: vfadd.vv v9, v9, v10 ; CHECK-NEXT: lui a0, %hi(scratch) ; CHECK-NEXT: addi a0, a0, %lo(scratch) diff --git a/llvm/test/CodeGen/RISCV/select-const.ll b/llvm/test/CodeGen/RISCV/select-const.ll --- a/llvm/test/CodeGen/RISCV/select-const.ll +++ b/llvm/test/CodeGen/RISCV/select-const.ll @@ -97,13 +97,12 @@ ; RV32IF: # %bb.0: ; RV32IF-NEXT: bnez a0, .LBB4_2 ; RV32IF-NEXT: # %bb.1: -; RV32IF-NEXT: lui a0, %hi(.LCPI4_0) -; RV32IF-NEXT: flw ft0, %lo(.LCPI4_0)(a0) -; RV32IF-NEXT: fmv.x.w a0, ft0 -; RV32IF-NEXT: ret +; RV32IF-NEXT: lui a0, 264192 +; RV32IF-NEXT: j .LBB4_3 ; RV32IF-NEXT: .LBB4_2: -; RV32IF-NEXT: lui a0, %hi(.LCPI4_1) -; RV32IF-NEXT: flw ft0, %lo(.LCPI4_1)(a0) +; RV32IF-NEXT: lui a0, 263168 +; RV32IF-NEXT: .LBB4_3: +; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret ; @@ -121,13 +120,12 @@ ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: bnez a0, .LBB4_2 ; RV64IFD-NEXT: # %bb.1: -; RV64IFD-NEXT: lui a0, %hi(.LCPI4_0) -; RV64IFD-NEXT: flw ft0, %lo(.LCPI4_0)(a0) -; RV64IFD-NEXT: fmv.x.w a0, ft0 -; RV64IFD-NEXT: ret +; RV64IFD-NEXT: lui a0, 264192 +; RV64IFD-NEXT: j .LBB4_3 ; RV64IFD-NEXT: .LBB4_2: -; RV64IFD-NEXT: lui a0, %hi(.LCPI4_1) -; RV64IFD-NEXT: flw ft0, %lo(.LCPI4_1)(a0) +; RV64IFD-NEXT: lui a0, 263168 +; RV64IFD-NEXT: .LBB4_3: +; RV64IFD-NEXT: fmv.w.x ft0, a0 ; RV64IFD-NEXT: fmv.x.w a0, ft0 ; RV64IFD-NEXT: ret %1 = select i1 %a, float 3.0, float 4.0 diff --git a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll --- a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll +++ b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll @@ -348,36 +348,36 @@ define float @CascadedSelect(float noundef %a) { ; RV32I-LABEL: CascadedSelect: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: lui a1, %hi(.LCPI8_0) -; RV32I-NEXT: flw ft0, %lo(.LCPI8_0)(a1) +; RV32I-NEXT: fmv.w.x ft0, a0 +; RV32I-NEXT: lui a0, 260096 ; RV32I-NEXT: fmv.w.x ft1, a0 -; RV32I-NEXT: flt.s a0, ft0, ft1 +; RV32I-NEXT: flt.s a0, ft1, ft0 ; RV32I-NEXT: bnez a0, .LBB8_3 ; RV32I-NEXT: # %bb.1: # %entry -; RV32I-NEXT: fmv.w.x ft0, zero -; RV32I-NEXT: flt.s a0, ft1, ft0 +; RV32I-NEXT: fmv.w.x ft1, zero +; RV32I-NEXT: flt.s a0, ft0, ft1 ; RV32I-NEXT: bnez a0, .LBB8_3 ; RV32I-NEXT: # %bb.2: # %entry -; RV32I-NEXT: fmv.s ft0, ft1 +; RV32I-NEXT: fmv.s ft1, ft0 ; RV32I-NEXT: .LBB8_3: # %entry -; RV32I-NEXT: fmv.x.w a0, ft0 +; RV32I-NEXT: fmv.x.w a0, ft1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: CascadedSelect: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: lui a1, %hi(.LCPI8_0) -; RV64I-NEXT: flw ft0, %lo(.LCPI8_0)(a1) +; RV64I-NEXT: fmv.w.x ft0, a0 +; RV64I-NEXT: lui a0, 260096 ; RV64I-NEXT: fmv.w.x ft1, a0 -; RV64I-NEXT: flt.s a0, ft0, ft1 +; RV64I-NEXT: flt.s a0, ft1, ft0 ; RV64I-NEXT: bnez a0, .LBB8_3 ; RV64I-NEXT: # %bb.1: # %entry -; RV64I-NEXT: fmv.w.x ft0, zero -; RV64I-NEXT: flt.s a0, ft1, ft0 +; RV64I-NEXT: fmv.w.x ft1, zero +; RV64I-NEXT: flt.s a0, ft0, ft1 ; RV64I-NEXT: bnez a0, .LBB8_3 ; RV64I-NEXT: # %bb.2: # %entry -; RV64I-NEXT: fmv.s ft0, ft1 +; RV64I-NEXT: fmv.s ft1, ft0 ; RV64I-NEXT: .LBB8_3: # %entry -; RV64I-NEXT: fmv.x.w a0, ft0 +; RV64I-NEXT: fmv.x.w a0, ft1 ; RV64I-NEXT: ret entry: %cmp = fcmp ogt float %a, 1.000000e+00 diff --git a/llvm/test/CodeGen/RISCV/zfh-imm.ll b/llvm/test/CodeGen/RISCV/zfh-imm.ll --- a/llvm/test/CodeGen/RISCV/zfh-imm.ll +++ b/llvm/test/CodeGen/RISCV/zfh-imm.ll @@ -34,26 +34,26 @@ define half @f16_negative_zero(ptr %pf) nounwind { ; RV32IZFH-LABEL: f16_negative_zero: ; RV32IZFH: # %bb.0: -; RV32IZFH-NEXT: fmv.h.x ft0, zero -; RV32IZFH-NEXT: fneg.h fa0, ft0 +; RV32IZFH-NEXT: lui a0, 1048568 +; RV32IZFH-NEXT: fmv.h.x fa0, a0 ; RV32IZFH-NEXT: ret ; ; RV32IDZFH-LABEL: f16_negative_zero: ; RV32IDZFH: # %bb.0: -; RV32IDZFH-NEXT: fmv.h.x ft0, zero -; RV32IDZFH-NEXT: fneg.h fa0, ft0 +; RV32IDZFH-NEXT: lui a0, 1048568 +; RV32IDZFH-NEXT: fmv.h.x fa0, a0 ; RV32IDZFH-NEXT: ret ; ; RV64IZFH-LABEL: f16_negative_zero: ; RV64IZFH: # %bb.0: -; RV64IZFH-NEXT: fmv.h.x ft0, zero -; RV64IZFH-NEXT: fneg.h fa0, ft0 +; RV64IZFH-NEXT: lui a0, 1048568 +; RV64IZFH-NEXT: fmv.h.x fa0, a0 ; RV64IZFH-NEXT: ret ; ; RV64IDZFH-LABEL: f16_negative_zero: ; RV64IDZFH: # %bb.0: -; RV64IDZFH-NEXT: fmv.h.x ft0, zero -; RV64IDZFH-NEXT: fneg.h fa0, ft0 +; RV64IDZFH-NEXT: lui a0, 1048568 +; RV64IDZFH-NEXT: fmv.h.x fa0, a0 ; RV64IDZFH-NEXT: ret ret half -0.0 } diff --git a/llvm/test/CodeGen/RISCV/zfhmin-imm.ll b/llvm/test/CodeGen/RISCV/zfhmin-imm.ll --- a/llvm/test/CodeGen/RISCV/zfhmin-imm.ll +++ b/llvm/test/CodeGen/RISCV/zfhmin-imm.ll @@ -34,30 +34,26 @@ define half @f16_negative_zero(ptr %pf) nounwind { ; RV32IZFHMIN-LABEL: f16_negative_zero: ; RV32IZFHMIN: # %bb.0: -; RV32IZFHMIN-NEXT: fmv.w.x ft0, zero -; RV32IZFHMIN-NEXT: fneg.s ft0, ft0 -; RV32IZFHMIN-NEXT: fcvt.h.s fa0, ft0 +; RV32IZFHMIN-NEXT: lui a0, 1048568 +; RV32IZFHMIN-NEXT: fmv.h.x fa0, a0 ; RV32IZFHMIN-NEXT: ret ; ; RV32IDZFHMIN-LABEL: f16_negative_zero: ; RV32IDZFHMIN: # %bb.0: -; RV32IDZFHMIN-NEXT: fmv.w.x ft0, zero -; RV32IDZFHMIN-NEXT: fneg.s ft0, ft0 -; RV32IDZFHMIN-NEXT: fcvt.h.s fa0, ft0 +; RV32IDZFHMIN-NEXT: lui a0, 1048568 +; RV32IDZFHMIN-NEXT: fmv.h.x fa0, a0 ; RV32IDZFHMIN-NEXT: ret ; ; RV64IZFHMIN-LABEL: f16_negative_zero: ; RV64IZFHMIN: # %bb.0: -; RV64IZFHMIN-NEXT: fmv.w.x ft0, zero -; RV64IZFHMIN-NEXT: fneg.s ft0, ft0 -; RV64IZFHMIN-NEXT: fcvt.h.s fa0, ft0 +; RV64IZFHMIN-NEXT: lui a0, 1048568 +; RV64IZFHMIN-NEXT: fmv.h.x fa0, a0 ; RV64IZFHMIN-NEXT: ret ; ; RV64IDZFHMIN-LABEL: f16_negative_zero: ; RV64IDZFHMIN: # %bb.0: -; RV64IDZFHMIN-NEXT: fmv.w.x ft0, zero -; RV64IDZFHMIN-NEXT: fneg.s ft0, ft0 -; RV64IDZFHMIN-NEXT: fcvt.h.s fa0, ft0 +; RV64IDZFHMIN-NEXT: lui a0, 1048568 +; RV64IDZFHMIN-NEXT: fmv.h.x fa0, a0 ; RV64IDZFHMIN-NEXT: ret ret half -0.0 }