diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst --- a/llvm/docs/RISCVUsage.rst +++ b/llvm/docs/RISCVUsage.rst @@ -100,7 +100,7 @@ ``Zbkc`` Supported ``Zbkx`` Supported (`See note <#riscv-scalar-crypto-note1>`__) ``Zbs`` Supported - ``Zdinx`` Assembly Support for RV32. Full support for RV64. + ``Zdinx`` Supported ``Zfh`` Supported ``Zfhmin`` Supported ``Zfinx`` Supported diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -50,6 +50,10 @@ bool expandVSetVL(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI); bool expandVMSET_VMCLR(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned Opcode); + bool expandRV32ZdinxStore(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI); + bool expandRV32ZdinxLoad(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI); #ifndef NDEBUG unsigned getInstSizeInBytes(const MachineFunction &MF) const { unsigned Size = 0; @@ -101,6 +105,10 @@ // expanded instructions for each pseudo is correct in the Size field of the // tablegen definition for the pseudo. switch (MBBI->getOpcode()) { + case RISCV::PseudoRV32ZdinxSD: + return expandRV32ZdinxStore(MBB, MBBI); + case RISCV::PseudoRV32ZdinxLD: + return expandRV32ZdinxLoad(MBB, MBBI); case RISCV::PseudoCCMOVGPR: case RISCV::PseudoCCADD: case RISCV::PseudoCCSUB: @@ -251,6 +259,89 @@ return true; } +// This function expands the PseudoRV32ZdinxSD for storing a double-precision +// floating-point value into memory by generating an equivalent instruction +// sequence for RV32. +bool RISCVExpandPseudo::expandRV32ZdinxStore(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI) { + MachineFunction *MF = MBB.getParent(); + DebugLoc DL = MBBI->getDebugLoc(); + const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); + Register Lo = TRI->getSubReg(MBBI->getOperand(0).getReg(), RISCV::sub_32); + Register Hi = TRI->getSubReg(MBBI->getOperand(0).getReg(), RISCV::sub_32_hi); + BuildMI(MBB, MBBI, DL, TII->get(RISCV::SW)) + .addReg(Lo, getKillRegState(MBBI->getOperand(0).isKill())) + .addReg(MBBI->getOperand(1).getReg()) + .add(MBBI->getOperand(2)); + if (MBBI->getOperand(2).isGlobal() || MBBI->getOperand(2).isCPI()) { + // FIXME: Zdinx RV32 can not work on unaligned scalar memory. + const auto &STI = MF->getSubtarget(); + assert(!STI.enableUnalignedScalarMem()); + + assert(MBBI->getOperand(2).getOffset() % 8 == 0); + MBBI->getOperand(2).setOffset(MBBI->getOperand(2).getOffset() + 4); + BuildMI(MBB, MBBI, DL, TII->get(RISCV::SW)) + .addReg(Hi, getKillRegState(MBBI->getOperand(0).isKill())) + .add(MBBI->getOperand(1)) + .add(MBBI->getOperand(2)); + } else { + assert(isInt<12>(MBBI->getOperand(2).getImm() + 4)); + BuildMI(MBB, MBBI, DL, TII->get(RISCV::SW)) + .addReg(Hi, getKillRegState(MBBI->getOperand(0).isKill())) + .add(MBBI->getOperand(1)) + .addImm(MBBI->getOperand(2).getImm() + 4); + } + MBBI->eraseFromParent(); + return true; +} + +// This function expands PseudoRV32ZdinxLoad for loading a double-precision +// floating-point value from memory into an equivalent instruction sequence for +// RV32. +bool RISCVExpandPseudo::expandRV32ZdinxLoad(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI) { + MachineFunction *MF = MBB.getParent(); + DebugLoc DL = MBBI->getDebugLoc(); + const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); + Register Lo = TRI->getSubReg(MBBI->getOperand(0).getReg(), RISCV::sub_32); + Register Hi = TRI->getSubReg(MBBI->getOperand(0).getReg(), RISCV::sub_32_hi); + + // If the register of operand 1 is equal to the Lo register, then swap the + // order of loading the Lo and Hi statements. + bool IsOp1EqualToLo = Lo == MBBI->getOperand(1).getReg(); + // Order: Lo, Hi + if (!IsOp1EqualToLo) { + BuildMI(MBB, MBBI, DL, TII->get(RISCV::LW), Lo) + .addReg(MBBI->getOperand(1).getReg()) + .add(MBBI->getOperand(2)); + } + + if (MBBI->getOperand(2).isGlobal() || MBBI->getOperand(2).isCPI()) { + auto Offset = MBBI->getOperand(2).getOffset(); + assert(MBBI->getOperand(2).getOffset() % 8 == 0); + MBBI->getOperand(2).setOffset(Offset + 4); + BuildMI(MBB, MBBI, DL, TII->get(RISCV::LW), Hi) + .addReg(MBBI->getOperand(1).getReg()) + .add(MBBI->getOperand(2)); + MBBI->getOperand(2).setOffset(Offset); + } else { + assert(isInt<12>(MBBI->getOperand(2).getImm() + 4)); + BuildMI(MBB, MBBI, DL, TII->get(RISCV::LW), Hi) + .addReg(MBBI->getOperand(1).getReg()) + .addImm(MBBI->getOperand(2).getImm() + 4); + } + + // Order: Hi, Lo + if (IsOp1EqualToLo) { + BuildMI(MBB, MBBI, DL, TII->get(RISCV::LW), Lo) + .addReg(MBBI->getOperand(1).getReg()) + .add(MBBI->getOperand(2)); + } + + MBBI->eraseFromParent(); + return true; +} + class RISCVPreRAExpandPseudo : public MachineFunctionPass { public: const RISCVInstrInfo *TII; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -48,7 +48,11 @@ bool SelectAddrFrameIndex(SDValue Addr, SDValue &Base, SDValue &Offset); bool SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset); - bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset); + bool SelectAddrRegImm(SDValue Addr, SDValue &Base, SDValue &Offset, + bool IsINX = false); + bool SelectAddrRegImmINX(SDValue Addr, SDValue &Base, SDValue &Offset) { + return SelectAddrRegImm(Addr, Base, Offset, true); + } bool SelectAddrRegRegScale(SDValue Addr, unsigned MaxShiftAmount, SDValue &Base, SDValue &Index, SDValue &Scale); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -895,7 +895,7 @@ if (Subtarget->is64Bit()) Opc = HasZdinx ? RISCV::COPY : RISCV::FMV_D_X; else - Opc = RISCV::FCVT_D_W; + Opc = HasZdinx ? RISCV::FCVT_D_W_IN32X : RISCV::FCVT_D_W; break; } @@ -2309,7 +2309,7 @@ } bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base, - SDValue &Offset) { + SDValue &Offset, bool IsINX) { if (SelectAddrFrameIndex(Addr, Base, Offset)) return true; @@ -2322,9 +2322,10 @@ return true; } + int64_t RV32ZdinxRange = IsINX ? 4 : 0; if (CurDAG->isBaseWithConstantOffset(Addr)) { int64_t CVal = cast(Addr.getOperand(1))->getSExtValue(); - if (isInt<12>(CVal)) { + if (isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) { Base = Addr.getOperand(0); if (Base.getOpcode() == RISCVISD::ADD_LO) { SDValue LoOperand = Base.getOperand(1); @@ -2358,7 +2359,8 @@ // Handle ADD with large immediates. if (Addr.getOpcode() == ISD::ADD && isa(Addr.getOperand(1))) { int64_t CVal = cast(Addr.getOperand(1))->getSExtValue(); - assert(!isInt<12>(CVal) && "simm12 not already handled?"); + assert(!(isInt<12>(CVal) && isInt<12>(CVal + RV32ZdinxRange)) && + "simm12 not already handled?"); // Handle immediates in the range [-4096,-2049] or [2048, 4094]. We can use // an ADDI for part of the offset and fold the rest into the load/store. diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -127,6 +127,8 @@ if (Subtarget.hasStdExtZdinx()) { if (Subtarget.is64Bit()) addRegisterClass(MVT::f64, &RISCV::GPRF64RegClass); + else + addRegisterClass(MVT::f64, &RISCV::GPRPF64RegClass); } static const MVT::SimpleValueType BoolVecVTs[] = { @@ -12842,7 +12844,9 @@ static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const RISCVSubtarget &Subtarget) { - assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); + assert((MI.getOpcode() == RISCV::SplitF64Pseudo || + MI.getOpcode() == RISCV::SplitF64Pseudo_INX) && + "Unexpected instruction"); MachineFunction &MF = *BB->getParent(); DebugLoc DL = MI.getDebugLoc(); @@ -12852,7 +12856,9 @@ Register HiReg = MI.getOperand(1).getReg(); Register SrcReg = MI.getOperand(2).getReg(); - const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; + const TargetRegisterClass *SrcRC = MI.getOpcode() == RISCV::SplitF64Pseudo_INX + ? &RISCV::GPRPF64RegClass + : &RISCV::FPR64RegClass; int FI = MF.getInfo()->getMoveF64FrameIndex(MF); TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, @@ -12877,7 +12883,8 @@ static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, MachineBasicBlock *BB, const RISCVSubtarget &Subtarget) { - assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo && + assert((MI.getOpcode() == RISCV::BuildPairF64Pseudo || + MI.getOpcode() == RISCV::BuildPairF64Pseudo_INX) && "Unexpected instruction"); MachineFunction &MF = *BB->getParent(); @@ -12888,7 +12895,9 @@ Register LoReg = MI.getOperand(1).getReg(); Register HiReg = MI.getOperand(2).getReg(); - const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; + const TargetRegisterClass *DstRC = + MI.getOpcode() == RISCV::BuildPairF64Pseudo_INX ? &RISCV::GPRPF64RegClass + : &RISCV::FPR64RegClass; int FI = MF.getInfo()->getMoveF64FrameIndex(MF); MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); @@ -12922,6 +12931,7 @@ case RISCV::Select_FPR32INX_Using_CC_GPR: case RISCV::Select_FPR64_Using_CC_GPR: case RISCV::Select_FPR64INX_Using_CC_GPR: + case RISCV::Select_FPR64IN32X_Using_CC_GPR: return true; } } @@ -13432,10 +13442,13 @@ case RISCV::Select_FPR32INX_Using_CC_GPR: case RISCV::Select_FPR64_Using_CC_GPR: case RISCV::Select_FPR64INX_Using_CC_GPR: + case RISCV::Select_FPR64IN32X_Using_CC_GPR: return emitSelectPseudo(MI, BB, Subtarget); case RISCV::BuildPairF64Pseudo: + case RISCV::BuildPairF64Pseudo_INX: return emitBuildPairF64Pseudo(MI, BB, Subtarget); case RISCV::SplitF64Pseudo: + case RISCV::SplitF64Pseudo_INX: return emitSplitF64Pseudo(MI, BB, Subtarget); case RISCV::PseudoQuietFLE_H: return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget); @@ -13457,10 +13470,16 @@ return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget); case RISCV::PseudoQuietFLE_D_INX: return emitQuietFCMP(MI, BB, RISCV::FLE_D_INX, RISCV::FEQ_D_INX, Subtarget); + case RISCV::PseudoQuietFLE_D_IN32X: + return emitQuietFCMP(MI, BB, RISCV::FLE_D_IN32X, RISCV::FEQ_D_IN32X, + Subtarget); case RISCV::PseudoQuietFLT_D: return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget); case RISCV::PseudoQuietFLT_D_INX: return emitQuietFCMP(MI, BB, RISCV::FLT_D_INX, RISCV::FEQ_D_INX, Subtarget); + case RISCV::PseudoQuietFLT_D_IN32X: + return emitQuietFCMP(MI, BB, RISCV::FLT_D_IN32X, RISCV::FEQ_D_IN32X, + Subtarget); // ========================================================================= // VFCVT @@ -13646,6 +13665,7 @@ case RISCV::PseudoFROUND_S_INX: case RISCV::PseudoFROUND_D: case RISCV::PseudoFROUND_D_INX: + case RISCV::PseudoFROUND_D_IN32X: return emitFROUND(MI, BB, Subtarget); } } @@ -15507,7 +15527,8 @@ // Subtarget into account. if (Res.second == &RISCV::GPRF16RegClass || Res.second == &RISCV::GPRF32RegClass || - Res.second == &RISCV::GPRF64RegClass) + Res.second == &RISCV::GPRF64RegClass || + Res.second == &RISCV::GPRPF64RegClass) return std::make_pair(Res.first, &RISCV::GPRRegClass); return Res; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -297,6 +297,13 @@ MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const { + const TargetRegisterInfo *TRI = STI.getRegisterInfo(); + + if (RISCV::GPRPF64RegClass.contains(DstReg)) + DstReg = TRI->getSubReg(DstReg, RISCV::sub_32); + if (RISCV::GPRPF64RegClass.contains(SrcReg)) + SrcReg = TRI->getSubReg(SrcReg, RISCV::sub_32); + if (RISCV::GPRRegClass.contains(DstReg, SrcReg)) { BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg) .addReg(SrcReg, getKillRegState(KillSrc)) @@ -307,10 +314,9 @@ // Handle copy from csr if (RISCV::VCSRRegClass.contains(SrcReg) && RISCV::GPRRegClass.contains(DstReg)) { - const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); BuildMI(MBB, MBBI, DL, get(RISCV::CSRRS), DstReg) - .addImm(RISCVSysReg::lookupSysRegByName(TRI.getName(SrcReg))->Encoding) - .addReg(RISCV::X0); + .addImm(RISCVSysReg::lookupSysRegByName(TRI->getName(SrcReg))->Encoding) + .addReg(RISCV::X0); return; } @@ -323,7 +329,6 @@ if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) { if (!STI.hasStdExtZfh() && STI.hasStdExtZfhmin()) { // Zfhmin subset doesn't have FSGNJ_H, replaces FSGNJ_H with FSGNJ_S. - const TargetRegisterInfo *TRI = STI.getRegisterInfo(); DstReg = TRI->getMatchingSuperReg(DstReg, RISCV::sub_16, &RISCV::FPR32RegClass); SrcReg = TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16, @@ -460,8 +465,6 @@ MIB.addReg(RISCV::VTYPE, RegState::Implicit); } } else { - const TargetRegisterInfo *TRI = STI.getRegisterInfo(); - int I = 0, End = NF, Incr = 1; unsigned SrcEncoding = TRI->getEncodingValue(SrcReg); unsigned DstEncoding = TRI->getEncodingValue(DstReg); @@ -518,6 +521,9 @@ Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW : RISCV::SD; IsScalableVector = false; + } else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) { + Opcode = RISCV::PseudoRV32ZdinxSD; + IsScalableVector = false; } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) { Opcode = RISCV::FSH; IsScalableVector = false; @@ -602,6 +608,9 @@ Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW : RISCV::LD; IsScalableVector = false; + } else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) { + Opcode = RISCV::PseudoRV32ZdinxLD; + IsScalableVector = false; } else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) { Opcode = RISCV::FLH; IsScalableVector = false; @@ -1272,6 +1281,7 @@ case RISCV::FSGNJ_S: case RISCV::FSGNJ_H: case RISCV::FSGNJ_D_INX: + case RISCV::FSGNJ_D_IN32X: case RISCV::FSGNJ_S_INX: case RISCV::FSGNJ_H_INX: // The canonical floating-point move is fsgnj rd, rs, rs. @@ -1304,6 +1314,7 @@ case RISCV::FSGNJ_S: case RISCV::FSGNJ_H: case RISCV::FSGNJ_D_INX: + case RISCV::FSGNJ_D_IN32X: case RISCV::FSGNJ_S_INX: case RISCV::FSGNJ_H_INX: // The canonical floating-point move is fsgnj rd, rs, rs. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -25,6 +25,8 @@ def RISCVBuildPairF64 : SDNode<"RISCVISD::BuildPairF64", SDT_RISCVBuildPairF64>; def RISCVSplitF64 : SDNode<"RISCVISD::SplitF64", SDT_RISCVSplitF64>; +def AddrRegImmINX : ComplexPattern; + //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. //===----------------------------------------------------------------------===// @@ -80,8 +82,6 @@ def XD_64 : ExtInfo_rr; defvar DINX = [D, D_INX, D_IN32X]; -// TODO: Remove DIN64X when Zdinx for RV32 supported -defvar DIN64X = [D, D_INX]; defvar DDINX = [DD, DD_INX, DD_IN32X]; defvar DXINX = [DX, DX_INX, DX_IN32X]; defvar DFINX = [DF, DF_INX, DF_IN32X]; @@ -234,6 +234,10 @@ (FLT_D_IN32X GPR:$rd, FPR64IN32X:$rt, FPR64IN32X:$rs), 0>; def : InstAlias<"fge.d $rd, $rs, $rt", (FLE_D_IN32X GPR:$rd, FPR64IN32X:$rt, FPR64IN32X:$rs), 0>; +let usesCustomInserter = 1 in { +def PseudoQuietFLE_D_IN32X : PseudoQuietFCMP; +def PseudoQuietFLT_D_IN32X : PseudoQuietFCMP; +} } // Predicates = [HasStdExtZdinx, IsRV32] //===----------------------------------------------------------------------===// @@ -257,6 +261,14 @@ def : Pat<(any_fpextend FPR32INX:$rs1), (FCVT_D_S_INX FPR32INX:$rs1)>; } // Predicates = [HasStdExtZdinx, IsRV64] +let Predicates = [HasStdExtZdinx, IsRV32] in { +/// Float conversion operations + +// f64 -> f32, f32 -> f64 +def : Pat<(any_fpround FPR64IN32X:$rs1), (FCVT_S_D_IN32X FPR64IN32X:$rs1, FRM_DYN)>; +def : Pat<(any_fpextend FPR32INX:$rs1), (FCVT_D_S_IN32X FPR32INX:$rs1)>; +} // Predicates = [HasStdExtZdinx, IsRV32] + // [u]int<->double conversion patterns must be gated on IsRV32 or IsRV64, so // are defined later. @@ -339,6 +351,43 @@ (FNMADD_D_INX FPR64INX:$rs1, FPR64INX:$rs2, FPR64INX:$rs3, FRM_DYN)>; } // Predicates = [HasStdExtZdinx, IsRV64] +let Predicates = [HasStdExtZdinx, IsRV32] in { +def : Pat<(any_fsqrt FPR64IN32X:$rs1), (FSQRT_D_IN32X FPR64IN32X:$rs1, FRM_DYN)>; + +def : Pat<(fneg FPR64IN32X:$rs1), (FSGNJN_D_IN32X $rs1, $rs1)>; +def : Pat<(fabs FPR64IN32X:$rs1), (FSGNJX_D_IN32X $rs1, $rs1)>; + +def : Pat<(riscv_fpclass FPR64IN32X:$rs1), (FCLASS_D_IN32X $rs1)>; + +def : PatFprFpr; +def : Pat<(fcopysign FPR64IN32X:$rs1, (fneg FPR64IN32X:$rs2)), + (FSGNJN_D_IN32X $rs1, $rs2)>; +def : Pat<(fcopysign FPR64IN32X:$rs1, FPR32INX:$rs2), + (FSGNJ_D_IN32X $rs1, (FCVT_D_S_INX $rs2))>; +def : Pat<(fcopysign FPR32INX:$rs1, FPR64IN32X:$rs2), + (FSGNJ_S_INX $rs1, (FCVT_S_D_IN32X $rs2, FRM_DYN))>; + +// fmadd: rs1 * rs2 + rs3 +def : Pat<(any_fma FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3), + (FMADD_D_IN32X $rs1, $rs2, $rs3, FRM_DYN)>; + +// fmsub: rs1 * rs2 - rs3 +def : Pat<(any_fma FPR64IN32X:$rs1, FPR64IN32X:$rs2, (fneg FPR64IN32X:$rs3)), + (FMSUB_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3, FRM_DYN)>; + +// fnmsub: -rs1 * rs2 + rs3 +def : Pat<(any_fma (fneg FPR64IN32X:$rs1), FPR64IN32X:$rs2, FPR64IN32X:$rs3), + (FNMSUB_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3, FRM_DYN)>; + +// fnmadd: -rs1 * rs2 - rs3 +def : Pat<(any_fma (fneg FPR64IN32X:$rs1), FPR64IN32X:$rs2, (fneg FPR64IN32X:$rs3)), + (FNMADD_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3, FRM_DYN)>; + +// fnmadd: -(rs1 * rs2 + rs3) (the nsz flag on the FMA) +def : Pat<(fneg (any_fma_nsz FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3)), + (FNMADD_D_IN32X FPR64IN32X:$rs1, FPR64IN32X:$rs2, FPR64IN32X:$rs3, FRM_DYN)>; +} // Predicates = [HasStdExtZdinx, IsRV32] + // The ratified 20191213 ISA spec defines fmin and fmax in a way that matches // LLVM's fminnum and fmaxnum. // . @@ -352,10 +401,10 @@ // Match non-signaling FEQ_D defm : PatSetCC_m; defm : PatSetCC_m; -defm : PatSetCC_m; -defm : PatSetCC_m; -defm : PatSetCC_m; -defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; let Predicates = [HasStdExtD] in { // Match signaling FEQ_D @@ -397,6 +446,26 @@ def : PatSetCC; } // Predicates = [HasStdExtZdinx, IsRV64] +let Predicates = [HasStdExtZdinx, IsRV32] in { +// Match signaling FEQ_D +def : Pat<(strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs2, SETEQ), + (AND (FLE_D_IN32X $rs1, $rs2), + (FLE_D_IN32X $rs2, $rs1))>; +def : Pat<(strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs2, SETOEQ), + (AND (FLE_D_IN32X $rs1, $rs2), + (FLE_D_IN32X $rs2, $rs1))>; +// If both operands are the same, use a single FLE. +def : Pat<(strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs1, SETEQ), + (FLE_D_IN32X $rs1, $rs1)>; +def : Pat<(strict_fsetccs FPR64IN32X:$rs1, FPR64IN32X:$rs1, SETOEQ), + (FLE_D_IN32X $rs1, $rs1)>; + +def : PatSetCC; +def : PatSetCC; +def : PatSetCC; +def : PatSetCC; +} // Predicates = [HasStdExtZdinx, IsRV32] + let Predicates = [HasStdExtD] in { defm Select_FPR64 : SelectCC_GPR_rrirr; @@ -440,6 +509,38 @@ (SD (COPY_TO_REGCLASS FPR64INX:$rs2, GPR), GPR:$rs1, simm12:$imm12)>; } // Predicates = [HasStdExtZdinx, IsRV64] +let Predicates = [HasStdExtZdinx, IsRV32] in { +defm Select_FPR64IN32X : SelectCC_GPR_rrirr; + +def PseudoFROUND_D_IN32X : PseudoFROUND; + +/// Loads +let isCall = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 1 in +def PseudoRV32ZdinxLD : Pseudo<(outs GPRPF64:$dst), (ins GPR:$rs1, simm12:$imm12), []>; +def : Pat<(f64 (load (AddrRegImmINX GPR:$rs1, simm12:$imm12))), + (PseudoRV32ZdinxLD GPR:$rs1, simm12:$imm12)>; + +/// Stores +let isCall = 0, mayLoad = 0, mayStore = 1, Size = 8, isCodeGenOnly = 1 in +def PseudoRV32ZdinxSD : Pseudo<(outs), (ins GPRPF64:$rs2, GPRNoX0:$rs1, simm12:$imm12), []>; +def : Pat<(store (f64 GPRPF64:$rs2), (AddrRegImmINX GPR:$rs1, simm12:$imm12)), + (PseudoRV32ZdinxSD GPRPF64:$rs2, GPR:$rs1, simm12:$imm12)>; + +/// Pseudo-instructions needed for the soft-float ABI with RV32D + +// Moves two GPRs to an FPR. +let usesCustomInserter = 1 in +def BuildPairF64Pseudo_INX + : Pseudo<(outs FPR64IN32X:$dst), (ins GPR:$src1, GPR:$src2), + [(set FPR64IN32X:$dst, (RISCVBuildPairF64 GPR:$src1, GPR:$src2))]>; + +// Moves an FPR to two GPRs. +let usesCustomInserter = 1 in +def SplitF64Pseudo_INX + : Pseudo<(outs GPR:$dst1, GPR:$dst2), (ins FPR64IN32X:$src), + [(set GPR:$dst1, GPR:$dst2, (RISCVSplitF64 FPR64IN32X:$src))]>; +} // Predicates = [HasStdExtZdinx, IsRV32] + let Predicates = [HasStdExtD, IsRV32] in { // double->[u]int. Round-to-zero must be used. @@ -461,6 +562,27 @@ def : Pat<(any_uint_to_fp (i32 GPR:$rs1)), (FCVT_D_WU GPR:$rs1)>; } // Predicates = [HasStdExtD, IsRV32] +let Predicates = [HasStdExtZdinx, IsRV32] in { + +// double->[u]int. Round-to-zero must be used. +def : Pat<(i32 (any_fp_to_sint FPR64IN32X:$rs1)), (FCVT_W_D_IN32X FPR64IN32X:$rs1, FRM_RTZ)>; +def : Pat<(i32 (any_fp_to_uint FPR64IN32X:$rs1)), (FCVT_WU_D_IN32X FPR64IN32X:$rs1, FRM_RTZ)>; + +// Saturating double->[u]int32. +def : Pat<(i32 (riscv_fcvt_x FPR64IN32X:$rs1, timm:$frm)), (FCVT_W_D_IN32X $rs1, timm:$frm)>; +def : Pat<(i32 (riscv_fcvt_xu FPR64IN32X:$rs1, timm:$frm)), (FCVT_WU_D_IN32X $rs1, timm:$frm)>; + +// float->int32 with current rounding mode. +def : Pat<(i32 (any_lrint FPR64IN32X:$rs1)), (FCVT_W_D_IN32X $rs1, FRM_DYN)>; + +// float->int32 rounded to nearest with ties rounded away from zero. +def : Pat<(i32 (any_lround FPR64IN32X:$rs1)), (FCVT_W_D_IN32X $rs1, FRM_RMM)>; + +// [u]int->double. +def : Pat<(any_sint_to_fp (i32 GPR:$rs1)), (FCVT_D_W_IN32X GPR:$rs1)>; +def : Pat<(any_uint_to_fp (i32 GPR:$rs1)), (FCVT_D_WU_IN32X GPR:$rs1)>; +} // Predicates = [HasStdExtZdinx, IsRV32] + let Predicates = [HasStdExtD, IsRV64] in { // Moves (no conversion) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td @@ -559,6 +559,18 @@ def : Pat<(fcopysign FPR64:$rs1, FPR16:$rs2), (FSGNJ_D $rs1, (FCVT_D_H $rs2))>; } // Predicates = [HasStdExtZfhOrZfhmin, HasStdExtD] +let Predicates = [HasStdExtZhinxOrZhinxmin, HasStdExtZdinx, IsRV32] in { +/// Float conversion operations +// f64 -> f16, f16 -> f64 +def : Pat<(any_fpround FPR64IN32X:$rs1), (FCVT_H_D_IN32X FPR64IN32X:$rs1, FRM_DYN)>; +def : Pat<(any_fpextend FPR16INX:$rs1), (FCVT_D_H_IN32X FPR16INX:$rs1)>; + +/// Float arithmetic operations +def : Pat<(fcopysign FPR16INX:$rs1, FPR64IN32X:$rs2), + (FSGNJ_H_INX $rs1, (FCVT_H_D_IN32X $rs2, 0b111))>; +def : Pat<(fcopysign FPR64IN32X:$rs1, FPR16INX:$rs2), (FSGNJ_D_IN32X $rs1, (FCVT_D_H_IN32X $rs2))>; +} // Predicates = [HasStdExtZhinxOrZhinxmin, HasStdExtZdinx, IsRV32] + let Predicates = [HasStdExtZhinxOrZhinxmin, HasStdExtZdinx, IsRV64] in { /// Float conversion operations // f64 -> f16, f16 -> f64 diff --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll --- a/llvm/test/CodeGen/RISCV/double-arith-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll @@ -5,6 +5,9 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -disable-strictnode-mutation -target-abi=lp64d \ ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -disable-strictnode-mutation -target-abi=ilp32 \ +; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -disable-strictnode-mutation -target-abi=lp64 \ ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s @@ -19,6 +22,25 @@ ; CHECKIFD-NEXT: fadd.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fadd_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fadd_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, a1 @@ -52,6 +74,25 @@ ; CHECKIFD-NEXT: fsub.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fsub_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fsub.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fsub_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fsub.d a0, a0, a1 @@ -85,6 +126,25 @@ ; CHECKIFD-NEXT: fmul.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmul_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmul.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmul_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmul.d a0, a0, a1 @@ -118,6 +178,25 @@ ; CHECKIFD-NEXT: fdiv.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fdiv_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fdiv.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fdiv_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fdiv.d a0, a0, a1 @@ -151,6 +230,21 @@ ; CHECKIFD-NEXT: fsqrt.d fa0, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fsqrt_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fsqrt_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fsqrt.d a0, a0 @@ -197,6 +291,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmin_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call fmin@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmin_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -247,6 +350,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmax_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call fmax@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmax_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -284,6 +396,29 @@ ; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmadd_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmadd_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmadd.d a0, a0, a1, a2 @@ -326,6 +461,31 @@ ; RV64IFD-NEXT: fmsub.d fa0, fa0, fa1, fa5 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmsub_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6 +; RV32IZFINXZDINX-NEXT: fmsub.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmsub_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a2, a2, zero @@ -410,6 +570,32 @@ ; RV64IFD-NEXT: fnmadd.d fa0, fa4, fa1, fa5 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fnmadd_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6 +; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6 +; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fnmadd_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, zero @@ -513,6 +699,32 @@ ; RV64IFD-NEXT: fnmadd.d fa0, fa4, fa0, fa5 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fnmadd_d_2: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6 +; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6 +; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a2, a0, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fnmadd_d_2: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a1, a1, zero @@ -615,6 +827,31 @@ ; RV64IFD-NEXT: fnmsub.d fa0, fa5, fa1, fa2 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fnmsub_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6 +; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fnmsub_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, zero @@ -693,6 +930,31 @@ ; RV64IFD-NEXT: fnmsub.d fa0, fa5, fa0, fa2 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fnmsub_d_2: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6 +; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a2, a0, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fnmsub_d_2: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a1, a1, zero diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll --- a/llvm/test/CodeGen/RISCV/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/double-arith.ll @@ -3,6 +3,8 @@ ; RUN: -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefix=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64 | FileCheck -check-prefix=RV64IZFINXZDINX %s ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ @@ -21,6 +23,25 @@ ; CHECKIFD-NEXT: fadd.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fadd_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fadd_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, a1 @@ -53,6 +74,25 @@ ; CHECKIFD-NEXT: fsub.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fsub_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fsub.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fsub_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fsub.d a0, a0, a1 @@ -85,6 +125,25 @@ ; CHECKIFD-NEXT: fmul.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmul_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmul.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmul_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmul.d a0, a0, a1 @@ -117,6 +176,25 @@ ; CHECKIFD-NEXT: fdiv.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fdiv_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fdiv.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fdiv_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fdiv.d a0, a0, a1 @@ -151,6 +229,21 @@ ; CHECKIFD-NEXT: fsqrt.d fa0, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fsqrt_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fsqrt_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fsqrt.d a0, a0 @@ -185,6 +278,25 @@ ; CHECKIFD-NEXT: fsgnj.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fsgnj_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fsgnj.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fsgnj_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fsgnj.d a0, a0, a1 @@ -221,6 +333,19 @@ ; CHECKIFD-NEXT: feq.d a0, fa5, fa4 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fneg_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: fneg.d a2, a0 +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fneg_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, a0 @@ -274,6 +399,25 @@ ; CHECKIFD-NEXT: fsgnjn.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fsgnjn_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fsgnjn.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fsgnjn_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: li a2, -1 @@ -318,6 +462,27 @@ ; CHECKIFD-NEXT: fadd.d fa0, fa4, fa5 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fabs_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: fabs.d a2, a0 +; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fabs_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, a1 @@ -365,6 +530,25 @@ ; CHECKIFD-NEXT: fmin.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmin_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmin_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmin.d a0, a0, a1 @@ -399,6 +583,25 @@ ; CHECKIFD-NEXT: fmax.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmax_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmax_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmax.d a0, a0, a1 @@ -433,6 +636,29 @@ ; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmadd_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmadd_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmadd.d a0, a0, a1, a2 @@ -474,6 +700,31 @@ ; RV64IFD-NEXT: fmsub.d fa0, fa0, fa1, fa5 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmsub_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6 +; RV32IZFINXZDINX-NEXT: fmsub.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmsub_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a2, a2, zero @@ -558,6 +809,32 @@ ; RV64IFD-NEXT: fnmadd.d fa0, fa4, fa1, fa5 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fnmadd_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6 +; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6 +; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fnmadd_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, zero @@ -661,6 +938,32 @@ ; RV64IFD-NEXT: fnmadd.d fa0, fa4, fa0, fa5 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fnmadd_d_2: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6 +; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6 +; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a2, a0, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fnmadd_d_2: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a1, a1, zero @@ -755,6 +1058,31 @@ ; CHECKIFD-NEXT: fneg.d fa0, fa5 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fnmadd_d_3: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lui a2, 524288 +; RV32IZFINXZDINX-NEXT: xor a1, a1, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fnmadd_d_3: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmadd.d a0, a0, a1, a2 @@ -797,6 +1125,31 @@ ; CHECKIFD-NEXT: fnmadd.d fa0, fa0, fa1, fa2 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fnmadd_nsz: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lui a2, 524288 +; RV32IZFINXZDINX-NEXT: xor a1, a1, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fnmadd_nsz: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmadd.d a0, a0, a1, a2 @@ -847,6 +1200,31 @@ ; RV64IFD-NEXT: fnmsub.d fa0, fa5, fa1, fa2 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fnmsub_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6 +; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fnmsub_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, zero @@ -925,6 +1303,31 @@ ; RV64IFD-NEXT: fnmsub.d fa0, fa5, fa0, fa2 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fnmsub_d_2: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6 +; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a2, a0, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fnmsub_d_2: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a1, a1, zero @@ -998,6 +1401,29 @@ ; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmadd_d_contract: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmadd_d_contract: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmadd.d a0, a0, a1, a2 @@ -1054,6 +1480,31 @@ ; RV64IFD-NEXT: fmsub.d fa0, fa0, fa1, fa5 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmsub_d_contract: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6 +; RV32IZFINXZDINX-NEXT: fmsub.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmsub_d_contract: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a2, a2, zero @@ -1148,6 +1599,33 @@ ; RV64IFD-NEXT: fnmadd.d fa0, fa4, fa3, fa5 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fnmadd_d_contract: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6 +; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6 +; RV32IZFINXZDINX-NEXT: fadd.d a4, a4, a6 +; RV32IZFINXZDINX-NEXT: fnmadd.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fnmadd_d_contract: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, zero @@ -1269,6 +1747,32 @@ ; RV64IFD-NEXT: fnmsub.d fa0, fa4, fa5, fa2 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fnmsub_d_contract: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a6, zero +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a6 +; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a6 +; RV32IZFINXZDINX-NEXT: fnmsub.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fnmsub_d_contract: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, zero diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll --- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll @@ -3,6 +3,8 @@ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv32 -target-abi=ilp32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv32 -target-abi=ilp32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -target-abi=lp64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s ; RUN: llc -mtriple=riscv64 -target-abi=lp64 -mattr=+d -verify-machineinstrs < %s \ @@ -32,6 +34,12 @@ ; RV32IFD-NEXT: xor a1, a1, a2 ; RV32IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fneg: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: lui a2, 524288 +; RV32IZFINXZDINX-NEXT: xor a1, a1, a2 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64I-LABEL: fneg: ; RV64I: # %bb.0: ; RV64I-NEXT: li a1, -1 @@ -71,6 +79,12 @@ ; RV32IFD-NEXT: srli a1, a1, 1 ; RV32IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fabs: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: slli a1, a1, 1 +; RV32IZFINXZDINX-NEXT: srli a1, a1, 1 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64I-LABEL: fabs: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 1 @@ -125,6 +139,25 @@ ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcopysign_fneg: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fsgnjn.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64I-LABEL: fcopysign_fneg: ; RV64I: # %bb.0: ; RV64I-NEXT: not a1, a1 diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll @@ -3,6 +3,8 @@ ; RUN: -target-abi=ilp32d | FileCheck -check-prefix=RV32IFD %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64d | FileCheck -check-prefix=RV64IFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefix=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64 | FileCheck -check-prefix=RV64IZFINXZDINX %s @@ -32,6 +34,17 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_false: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: li a0, 1 +; RV32IZFINXZDINX-NEXT: bnez a0, .LBB0_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.then +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB0_2: # %if.else +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_false: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: li a0, 1 @@ -74,6 +87,27 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_oeq: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: bnez a0, .LBB1_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB1_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_oeq: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a1 @@ -119,6 +153,27 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_oeq_alt: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: bnez a0, .LBB2_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB2_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_oeq_alt: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a1 @@ -161,6 +216,27 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_ogt: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: bnez a0, .LBB3_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB3_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_ogt: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: flt.d a0, a1, a0 @@ -203,6 +279,27 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_oge: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: bnez a0, .LBB4_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB4_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_oge: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fle.d a0, a1, a0 @@ -245,6 +342,27 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_olt: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: bnez a0, .LBB5_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB5_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_olt: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: flt.d a0, a0, a1 @@ -287,6 +405,27 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_ole: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: bnez a0, .LBB6_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB6_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_ole: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a1 @@ -333,6 +472,29 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_one: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2 +; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: or a0, a0, a4 +; RV32IZFINXZDINX-NEXT: bnez a0, .LBB7_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB7_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_one: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: flt.d a2, a0, a1 @@ -381,6 +543,29 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_ord: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2 +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: bnez a0, .LBB8_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB8_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_ord: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: feq.d a1, a1, a1 @@ -429,6 +614,29 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_ueq: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2 +; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: or a0, a0, a4 +; RV32IZFINXZDINX-NEXT: beqz a0, .LBB9_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB9_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_ueq: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: flt.d a2, a0, a1 @@ -473,6 +681,27 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_ugt: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: beqz a0, .LBB10_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB10_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_ugt: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a1 @@ -515,6 +744,27 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_uge: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: beqz a0, .LBB11_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB11_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_uge: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: flt.d a0, a0, a1 @@ -557,6 +807,27 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_ult: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: beqz a0, .LBB12_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB12_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_ult: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fle.d a0, a1, a0 @@ -599,6 +870,27 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_ule: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: beqz a0, .LBB13_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB13_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_ule: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: flt.d a0, a1, a0 @@ -641,6 +933,27 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_une: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: beqz a0, .LBB14_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB14_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_une: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a1 @@ -687,6 +1000,29 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_uno: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2 +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: beqz a0, .LBB15_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB15_2: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_uno: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: feq.d a1, a1, a1 @@ -731,6 +1067,17 @@ ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: call abort@plt ; +; RV32IZFINXZDINX-LABEL: br_fcmp_true: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: li a0, 1 +; RV32IZFINXZDINX-NEXT: bnez a0, .LBB16_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: ret +; RV32IZFINXZDINX-NEXT: .LBB16_2: # %if.then +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call abort@plt +; ; RV64IZFINXZDINX-LABEL: br_fcmp_true: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: li a0, 1 diff --git a/llvm/test/CodeGen/RISCV/double-calling-conv.ll b/llvm/test/CodeGen/RISCV/double-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/double-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/double-calling-conv.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi=ilp32 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -target-abi=ilp32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s ; Basic correctness checks for calling convention lowering for RV32D. This can ; be somewhat error-prone for soft-float RV32D due to the fact that f64 is legal @@ -23,6 +25,25 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV32IZFINXZDINX-LABEL: callee_double_inreg: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret %1 = fadd double %a, %b ret double %1 } @@ -45,6 +66,22 @@ ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV32IZFINXZDINX-LABEL: caller_double_inreg: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: lui a0, 262236 +; RV32IZFINXZDINX-NEXT: addi a1, a0, 655 +; RV32IZFINXZDINX-NEXT: lui a0, 377487 +; RV32IZFINXZDINX-NEXT: addi a0, a0, 1475 +; RV32IZFINXZDINX-NEXT: lui a2, 262364 +; RV32IZFINXZDINX-NEXT: addi a3, a2, 655 +; RV32IZFINXZDINX-NEXT: mv a2, a0 +; RV32IZFINXZDINX-NEXT: call callee_double_inreg@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret %1 = call double @callee_double_inreg(double 2.720000e+00, double 3.720000e+00) ret double %1 } @@ -66,6 +103,26 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV32IZFINXZDINX-LABEL: callee_double_split_reg_stack: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: lw a0, 16(sp) +; RV32IZFINXZDINX-NEXT: sw a7, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a6, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret %1 = fadd double %d, %e ret double %1 } @@ -92,6 +149,28 @@ ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV32IZFINXZDINX-LABEL: caller_double_split_reg_stack: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: lui a0, 262510 +; RV32IZFINXZDINX-NEXT: addi a2, a0, 327 +; RV32IZFINXZDINX-NEXT: lui a0, 262446 +; RV32IZFINXZDINX-NEXT: addi a6, a0, 327 +; RV32IZFINXZDINX-NEXT: lui a0, 713032 +; RV32IZFINXZDINX-NEXT: addi a5, a0, -1311 +; RV32IZFINXZDINX-NEXT: li a0, 1 +; RV32IZFINXZDINX-NEXT: li a1, 2 +; RV32IZFINXZDINX-NEXT: li a3, 3 +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: li a2, 0 +; RV32IZFINXZDINX-NEXT: li a4, 0 +; RV32IZFINXZDINX-NEXT: mv a7, a5 +; RV32IZFINXZDINX-NEXT: call callee_double_split_reg_stack@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret %1 = call double @callee_double_split_reg_stack(i32 1, i64 2, i64 3, double 4.72, double 5.72) ret double %1 } @@ -108,6 +187,21 @@ ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret +; +; RV32IZFINXZDINX-LABEL: callee_double_stack: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: lw a0, 24(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 28(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 16(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 20(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret %1 = fadd double %e, %f ret double %1 } @@ -139,6 +233,33 @@ ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret +; +; RV32IZFINXZDINX-LABEL: caller_double_stack: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: lui a0, 262510 +; RV32IZFINXZDINX-NEXT: addi a0, a0, 327 +; RV32IZFINXZDINX-NEXT: sw a0, 4(sp) +; RV32IZFINXZDINX-NEXT: lui a0, 713032 +; RV32IZFINXZDINX-NEXT: addi a1, a0, -1311 +; RV32IZFINXZDINX-NEXT: sw a1, 0(sp) +; RV32IZFINXZDINX-NEXT: lui a0, 262574 +; RV32IZFINXZDINX-NEXT: addi a0, a0, 327 +; RV32IZFINXZDINX-NEXT: sw a0, 12(sp) +; RV32IZFINXZDINX-NEXT: li a0, 1 +; RV32IZFINXZDINX-NEXT: li a2, 2 +; RV32IZFINXZDINX-NEXT: li a4, 3 +; RV32IZFINXZDINX-NEXT: li a6, 4 +; RV32IZFINXZDINX-NEXT: sw a1, 8(sp) +; RV32IZFINXZDINX-NEXT: li a1, 0 +; RV32IZFINXZDINX-NEXT: li a3, 0 +; RV32IZFINXZDINX-NEXT: li a5, 0 +; RV32IZFINXZDINX-NEXT: li a7, 0 +; RV32IZFINXZDINX-NEXT: call callee_double_stack@plt +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret %1 = call double @callee_double_stack(i64 1, i64 2, i64 3, i64 4, double 5.72, double 6.72) ret double %1 } @@ -147,5 +268,9 @@ ; RV32IFD-LABEL: func_return_double_undef: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: ret +; +; RV32IZFINXZDINX-LABEL: func_return_double_undef: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: ret ret double undef } diff --git a/llvm/test/CodeGen/RISCV/double-convert-strict.ll b/llvm/test/CodeGen/RISCV/double-convert-strict.ll --- a/llvm/test/CodeGen/RISCV/double-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-convert-strict.ll @@ -5,6 +5,9 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -disable-strictnode-mutation -target-abi=lp64d \ ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -disable-strictnode-mutation -target-abi=ilp32 \ +; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -disable-strictnode-mutation -target-abi=lp64 \ ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s @@ -23,6 +26,17 @@ ; CHECKIFD-NEXT: fcvt.s.d fa0, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_s_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.s.d a0, a0 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_s_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.s.d a0, a0 @@ -56,6 +70,17 @@ ; CHECKIFD-NEXT: fcvt.d.s fa0, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_s: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.s a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_s: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.s a0, a0 @@ -89,6 +114,17 @@ ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rtz ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_w_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_w_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz @@ -124,6 +160,17 @@ ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_wu_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_wu_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz @@ -161,6 +208,19 @@ ; CHECKIFD-NEXT: add a0, a0, a1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: seqz a1, a0 +; RV32IZFINXZDINX-NEXT: add a0, a0, a1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz @@ -201,6 +261,17 @@ ; CHECKIFD-NEXT: fcvt.d.w fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_w: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_w: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0 @@ -236,6 +307,18 @@ ; CHECKIFD-NEXT: fcvt.d.w fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_w_load: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: lw a0, 0(a0) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_w_load: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lw a0, 0(a0) @@ -272,6 +355,17 @@ ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_wu: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_wu: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 @@ -313,6 +407,18 @@ ; RV64IFD-NEXT: fcvt.d.wu fa0, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_wu_load: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: lw a0, 0(a0) +; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_load: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lwu a0, 0(a0) @@ -358,6 +464,15 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_l_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_l_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rtz @@ -400,6 +515,15 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_lu_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_lu_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rtz @@ -442,6 +566,15 @@ ; RV64IFD-NEXT: fcvt.d.l fa0, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_l: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call __floatdidf@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_l: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.l a0, a0 @@ -484,6 +617,15 @@ ; RV64IFD-NEXT: fcvt.d.lu fa0, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_lu: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call __floatundidf@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_lu: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.lu a0, a0 @@ -517,6 +659,17 @@ ; CHECKIFD-NEXT: fcvt.d.w fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_w_i8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_w_i8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0 @@ -550,6 +703,17 @@ ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 @@ -583,6 +747,17 @@ ; CHECKIFD-NEXT: fcvt.d.w fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_w_i16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_w_i16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0 @@ -616,6 +791,17 @@ ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 @@ -659,6 +845,14 @@ ; RV64IFD-NEXT: fsd fa5, 0(a1) ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_w_demanded_bits: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi a0, a0, 1 +; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, a0 +; RV32IZFINXZDINX-NEXT: sw a2, 0(a1) +; RV32IZFINXZDINX-NEXT: sw a3, 4(a1) +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_w_demanded_bits: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addiw a2, a0, 1 @@ -726,6 +920,14 @@ ; RV64IFD-NEXT: fsd fa5, 0(a1) ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_wu_demanded_bits: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi a0, a0, 1 +; RV32IZFINXZDINX-NEXT: fcvt.d.wu a2, a0 +; RV32IZFINXZDINX-NEXT: sw a2, 0(a1) +; RV32IZFINXZDINX-NEXT: sw a3, 4(a1) +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_demanded_bits: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addiw a0, a0, 1 diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -3,6 +3,8 @@ ; RUN: -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64 | FileCheck -check-prefixes=RV64IZFINXZDINX %s ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ @@ -16,6 +18,17 @@ ; CHECKIFD-NEXT: fcvt.s.d fa0, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_s_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.s.d a0, a0 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_s_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.s.d a0, a0 @@ -48,6 +61,17 @@ ; CHECKIFD-NEXT: fcvt.d.s fa0, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_s: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.s a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_s: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.s a0, a0 @@ -80,6 +104,17 @@ ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rtz ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_w_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_w_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz @@ -116,6 +151,21 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_w_d_sat: +; RV32IZFINXZDINX: # %bb.0: # %start +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rtz +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_w_d_sat: ; RV64IZFINXZDINX: # %bb.0: # %start ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rtz @@ -235,6 +285,17 @@ ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_wu_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_wu_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz @@ -271,6 +332,19 @@ ; CHECKIFD-NEXT: add a0, a0, a1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: seqz a1, a0 +; RV32IZFINXZDINX-NEXT: add a0, a0, a1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_wu_d_multiple_use: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz @@ -326,6 +400,21 @@ ; RV64IFD-NEXT: srli a0, a0, 32 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_wu_d_sat: +; RV32IZFINXZDINX: # %bb.0: # %start +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rtz +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_wu_d_sat: ; RV64IZFINXZDINX: # %bb.0: # %start ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rtz @@ -421,6 +510,17 @@ ; CHECKIFD-NEXT: fcvt.d.w fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_w: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_w: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0 @@ -455,6 +555,18 @@ ; CHECKIFD-NEXT: fcvt.d.w fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_w_load: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: lw a0, 0(a0) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_w_load: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lw a0, 0(a0) @@ -491,6 +603,17 @@ ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_wu: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_wu: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 @@ -531,6 +654,18 @@ ; RV64IFD-NEXT: fcvt.d.wu fa0, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_wu_load: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: lw a0, 0(a0) +; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_load: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lwu a0, 0(a0) @@ -576,6 +711,15 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_l_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_l_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rtz @@ -650,6 +794,49 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_l_d_sat: +; RV32IZFINXZDINX: # %bb.0: # %start +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI12_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI12_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI12_0)(a2) +; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: lui a5, 524288 +; RV32IZFINXZDINX-NEXT: lui a3, 524288 +; RV32IZFINXZDINX-NEXT: beqz a2, .LBB12_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %start +; RV32IZFINXZDINX-NEXT: mv a3, a1 +; RV32IZFINXZDINX-NEXT: .LBB12_2: # %start +; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI12_1) +; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI12_1)(a1) +; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI12_1+4)(a1) +; RV32IZFINXZDINX-NEXT: flt.d a4, a6, s0 +; RV32IZFINXZDINX-NEXT: beqz a4, .LBB12_4 +; RV32IZFINXZDINX-NEXT: # %bb.3: +; RV32IZFINXZDINX-NEXT: addi a3, a5, -1 +; RV32IZFINXZDINX-NEXT: .LBB12_4: # %start +; RV32IZFINXZDINX-NEXT: feq.d a1, s0, s0 +; RV32IZFINXZDINX-NEXT: neg a5, a1 +; RV32IZFINXZDINX-NEXT: and a1, a5, a3 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: and a0, a2, a0 +; RV32IZFINXZDINX-NEXT: neg a2, a4 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a0, a5, a0 +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_l_d_sat: ; RV64IZFINXZDINX: # %bb.0: # %start ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rtz @@ -785,6 +972,15 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_lu_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_lu_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rtz @@ -846,6 +1042,35 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_lu_d_sat: +; RV32IZFINXZDINX: # %bb.0: # %start +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero +; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI14_0) +; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI14_0+4)(a4) +; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI14_0)(a4) +; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: and a0, a2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a3, a4, s0 +; RV32IZFINXZDINX-NEXT: neg a3, a3 +; RV32IZFINXZDINX-NEXT: or a0, a3, a0 +; RV32IZFINXZDINX-NEXT: and a1, a2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a3, a1 +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_lu_d_sat: ; RV64IZFINXZDINX: # %bb.0: # %start ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rtz @@ -943,6 +1168,25 @@ ; RV64IFD-NEXT: fmv.x.d a0, fa5 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmv_x_d: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 4(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmv_x_d: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, a1 @@ -985,6 +1229,15 @@ ; RV64IFD-NEXT: fcvt.d.l fa0, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_l: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call __floatdidf@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_l: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.l a0, a0 @@ -1026,6 +1279,15 @@ ; RV64IFD-NEXT: fcvt.d.lu fa0, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_lu: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call __floatundidf@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_lu: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.lu a0, a0 @@ -1074,6 +1336,25 @@ ; RV64IFD-NEXT: fadd.d fa0, fa5, fa4 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmv_d_x: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw a3, 20(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 16(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 28(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 24(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 16(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 20(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 24(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 28(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmv_d_x: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, a1 @@ -1108,6 +1389,17 @@ ; CHECKIFD-NEXT: fcvt.d.w fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_w_i8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_w_i8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0 @@ -1140,6 +1432,17 @@ ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 @@ -1172,6 +1475,17 @@ ; CHECKIFD-NEXT: fcvt.d.w fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_w_i16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.w a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_w_i16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.w a0, a0 @@ -1204,6 +1518,17 @@ ; CHECKIFD-NEXT: fcvt.d.wu fa0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_wu_i16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_i16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.d.wu a0, a0 @@ -1246,6 +1571,14 @@ ; RV64IFD-NEXT: fsd fa5, 0(a1) ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_w_demanded_bits: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi a0, a0, 1 +; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, a0 +; RV32IZFINXZDINX-NEXT: sw a2, 0(a1) +; RV32IZFINXZDINX-NEXT: sw a3, 4(a1) +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_w_demanded_bits: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addiw a2, a0, 1 @@ -1313,6 +1646,14 @@ ; RV64IFD-NEXT: fsd fa5, 0(a1) ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_d_wu_demanded_bits: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi a0, a0, 1 +; RV32IZFINXZDINX-NEXT: fcvt.d.wu a2, a0 +; RV32IZFINXZDINX-NEXT: sw a2, 0(a1) +; RV32IZFINXZDINX-NEXT: sw a3, 4(a1) +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_d_wu_demanded_bits: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addiw a0, a0, 1 @@ -1373,6 +1714,17 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_w_s_i16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_w_s_i16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rtz @@ -1428,6 +1780,28 @@ ; RV64IFD-NEXT: and a0, a0, a1 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_w_s_sat_i16: +; RV32IZFINXZDINX: # %bb.0: # %start +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI26_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI26_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI26_0)(a2) +; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI26_1) +; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI26_1+4)(a4) +; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI26_1)(a4) +; RV32IZFINXZDINX-NEXT: fmax.d a2, a0, a2 +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: neg a0, a0 +; RV32IZFINXZDINX-NEXT: fmin.d a2, a2, a4 +; RV32IZFINXZDINX-NEXT: fcvt.w.d a1, a2, rtz +; RV32IZFINXZDINX-NEXT: and a0, a0, a1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_w_s_sat_i16: ; RV64IZFINXZDINX: # %bb.0: # %start ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI26_0) @@ -1557,6 +1931,17 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_wu_s_i16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_wu_s_i16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rtz @@ -1604,6 +1989,23 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa5, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_wu_s_sat_i16: +; RV32IZFINXZDINX: # %bb.0: # %start +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI28_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI28_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI28_0)(a2) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a4, zero +; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a4 +; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_wu_s_sat_i16: ; RV64IZFINXZDINX: # %bb.0: # %start ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI28_0) @@ -1711,6 +2113,17 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_w_s_i8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_w_s_i8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rtz @@ -1766,6 +2179,28 @@ ; RV64IFD-NEXT: and a0, a0, a1 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_w_s_sat_i8: +; RV32IZFINXZDINX: # %bb.0: # %start +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI30_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI30_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI30_0)(a2) +; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI30_1) +; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI30_1+4)(a4) +; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI30_1)(a4) +; RV32IZFINXZDINX-NEXT: fmax.d a2, a0, a2 +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: neg a0, a0 +; RV32IZFINXZDINX-NEXT: fmin.d a2, a2, a4 +; RV32IZFINXZDINX-NEXT: fcvt.w.d a1, a2, rtz +; RV32IZFINXZDINX-NEXT: and a0, a0, a1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_w_s_sat_i8: ; RV64IZFINXZDINX: # %bb.0: # %start ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI30_0) @@ -1892,6 +2327,17 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_wu_s_i8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_wu_s_i8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rtz @@ -1941,6 +2387,23 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa5, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_wu_s_sat_i8: +; RV32IZFINXZDINX: # %bb.0: # %start +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI32_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI32_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI32_0)(a2) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a4, zero +; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a4 +; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_wu_s_sat_i8: ; RV64IZFINXZDINX: # %bb.0: # %start ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI32_0) @@ -2052,6 +2515,21 @@ ; RV64IFD-NEXT: srli a0, a0, 32 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_wu_d_sat_zext: +; RV32IZFINXZDINX: # %bb.0: # %start +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rtz +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_wu_d_sat_zext: ; RV64IZFINXZDINX: # %bb.0: # %start ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rtz @@ -2152,6 +2630,21 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcvt_w_d_sat_sext: +; RV32IZFINXZDINX: # %bb.0: # %start +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rtz +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcvt_w_d_sat_sext: ; RV64IZFINXZDINX: # %bb.0: # %start ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rtz diff --git a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll --- a/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-fcmp-strict.ll @@ -5,6 +5,9 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -disable-strictnode-mutation -target-abi=lp64d \ ; RUN: | FileCheck -check-prefix=CHECKIFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -disable-strictnode-mutation -target-abi=ilp32 \ +; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -disable-strictnode-mutation -target-abi=lp64 \ ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s @@ -19,6 +22,21 @@ ; CHECKIFD-NEXT: feq.d a0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_oeq: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_oeq: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a1 @@ -58,6 +76,24 @@ ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_ogt: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: csrr a1, fflags +; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a4 +; RV32IZFINXZDINX-NEXT: csrw fflags, a1 +; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_ogt: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: csrr a3, fflags @@ -100,6 +136,24 @@ ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_oge: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: csrr a1, fflags +; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a4 +; RV32IZFINXZDINX-NEXT: csrw fflags, a1 +; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_oge: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: csrr a3, fflags @@ -144,6 +198,24 @@ ; CHECKIFD-NEXT: feq.d zero, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_olt: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: csrr a1, fflags +; RV32IZFINXZDINX-NEXT: flt.d a0, a4, a2 +; RV32IZFINXZDINX-NEXT: csrw fflags, a1 +; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_olt: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: csrr a3, fflags @@ -186,6 +258,24 @@ ; CHECKIFD-NEXT: feq.d zero, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_ole: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: csrr a1, fflags +; RV32IZFINXZDINX-NEXT: fle.d a0, a4, a2 +; RV32IZFINXZDINX-NEXT: csrw fflags, a1 +; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_ole: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: csrr a3, fflags @@ -235,6 +325,29 @@ ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_one: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: csrr a0, fflags +; RV32IZFINXZDINX-NEXT: flt.d a1, a4, a2 +; RV32IZFINXZDINX-NEXT: csrw fflags, a0 +; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2 +; RV32IZFINXZDINX-NEXT: csrr a0, fflags +; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a4 +; RV32IZFINXZDINX-NEXT: csrw fflags, a0 +; RV32IZFINXZDINX-NEXT: or a0, a6, a1 +; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_one: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: csrr a2, fflags @@ -315,6 +428,23 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_ord: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2 +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_ord: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: feq.d a1, a1, a1 @@ -363,6 +493,30 @@ ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_ueq: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: csrr a0, fflags +; RV32IZFINXZDINX-NEXT: flt.d a1, a4, a2 +; RV32IZFINXZDINX-NEXT: csrw fflags, a0 +; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2 +; RV32IZFINXZDINX-NEXT: csrr a0, fflags +; RV32IZFINXZDINX-NEXT: flt.d a6, a2, a4 +; RV32IZFINXZDINX-NEXT: csrw fflags, a0 +; RV32IZFINXZDINX-NEXT: or a0, a6, a1 +; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_ueq: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: csrr a2, fflags @@ -446,6 +600,25 @@ ; CHECKIFD-NEXT: feq.d zero, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_ugt: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: csrr a0, fflags +; RV32IZFINXZDINX-NEXT: fle.d a1, a4, a2 +; RV32IZFINXZDINX-NEXT: csrw fflags, a0 +; RV32IZFINXZDINX-NEXT: xori a0, a1, 1 +; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_ugt: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: csrr a2, fflags @@ -490,6 +663,25 @@ ; CHECKIFD-NEXT: feq.d zero, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_uge: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: csrr a0, fflags +; RV32IZFINXZDINX-NEXT: flt.d a1, a4, a2 +; RV32IZFINXZDINX-NEXT: csrw fflags, a0 +; RV32IZFINXZDINX-NEXT: xori a0, a1, 1 +; RV32IZFINXZDINX-NEXT: feq.d zero, a4, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_uge: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: csrr a2, fflags @@ -536,6 +728,25 @@ ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_ult: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: csrr a0, fflags +; RV32IZFINXZDINX-NEXT: fle.d a1, a2, a4 +; RV32IZFINXZDINX-NEXT: csrw fflags, a0 +; RV32IZFINXZDINX-NEXT: xori a0, a1, 1 +; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_ult: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: csrr a2, fflags @@ -580,6 +791,25 @@ ; CHECKIFD-NEXT: feq.d zero, fa1, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_ule: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: csrr a0, fflags +; RV32IZFINXZDINX-NEXT: flt.d a1, a2, a4 +; RV32IZFINXZDINX-NEXT: csrw fflags, a0 +; RV32IZFINXZDINX-NEXT: xori a0, a1, 1 +; RV32IZFINXZDINX-NEXT: feq.d zero, a2, a4 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_ule: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: csrr a2, fflags @@ -621,6 +851,22 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_une: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_une: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a1 @@ -660,6 +906,24 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmp_uno: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: feq.d a2, a2, a2 +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmp_uno: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: feq.d a1, a1, a1 @@ -700,6 +964,23 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_oeq: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0 +; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: and a0, a0, a4 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_oeq: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fle.d a2, a1, a0 @@ -738,6 +1019,21 @@ ; CHECKIFD-NEXT: flt.d a0, fa1, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_ogt: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_ogt: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: flt.d a0, a1, a0 @@ -773,6 +1069,21 @@ ; CHECKIFD-NEXT: fle.d a0, fa1, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_oge: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_oge: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fle.d a0, a1, a0 @@ -810,6 +1121,21 @@ ; CHECKIFD-NEXT: flt.d a0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_olt: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_olt: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: flt.d a0, a0, a1 @@ -845,6 +1171,21 @@ ; CHECKIFD-NEXT: fle.d a0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_ole: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_ole: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a1 @@ -882,6 +1223,23 @@ ; CHECKIFD-NEXT: or a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_one: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2 +; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: or a0, a0, a4 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_one: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: flt.d a2, a0, a1 @@ -955,6 +1313,23 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_ord: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fle.d a2, a2, a2 +; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_ord: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fle.d a1, a1, a1 @@ -995,6 +1370,24 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_ueq: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: flt.d a4, a0, a2 +; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: or a0, a0, a4 +; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_ueq: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: flt.d a2, a0, a1 @@ -1068,6 +1461,22 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_ugt: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_ugt: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fle.d a0, a0, a1 @@ -1105,6 +1514,22 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_uge: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: flt.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_uge: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: flt.d a0, a0, a1 @@ -1144,6 +1569,22 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_ult: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fle.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_ult: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fle.d a0, a1, a0 @@ -1181,6 +1622,22 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_ule: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_ule: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: flt.d a0, a1, a0 @@ -1220,6 +1677,24 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_une: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fle.d a4, a2, a0 +; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: and a0, a0, a4 +; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_une: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fle.d a2, a1, a0 @@ -1261,6 +1736,24 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fcmps_uno: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fle.d a2, a2, a2 +; RV32IZFINXZDINX-NEXT: fle.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fcmps_uno: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fle.d a1, a1, a1 diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll @@ -3,8 +3,10 @@ ; RUN: -target-abi=ilp32d | FileCheck -check-prefix=CHECKIFD %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64d | FileCheck -check-prefix=CHECKIFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=CHECKIZFINXZDINX,CHECKRV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ -; RUN: -target-abi=lp64 | FileCheck -check-prefix=CHECKIZFINXZDINX %s +; RUN: -target-abi=lp64 | FileCheck -check-prefixes=CHECKIZFINXZDINX,CHECKRV64IZFINXZDINX %s ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ @@ -41,10 +43,25 @@ ; CHECKIFD-NEXT: feq.d a0, fa0, fa1 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_oeq: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: feq.d a0, a0, a1 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_oeq: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: feq.d a0, a0, a2 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_oeq: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: feq.d a0, a0, a1 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_oeq: ; RV32I: # %bb.0: @@ -76,10 +93,25 @@ ; CHECKIFD-NEXT: flt.d a0, fa1, fa0 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_ogt: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: flt.d a0, a1, a0 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_ogt: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_ogt: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: flt.d a0, a1, a0 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_ogt: ; RV32I: # %bb.0: @@ -111,10 +143,25 @@ ; CHECKIFD-NEXT: fle.d a0, fa1, fa0 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_oge: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: fle.d a0, a1, a0 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_oge: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: fle.d a0, a2, a0 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_oge: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: fle.d a0, a1, a0 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_oge: ; RV32I: # %bb.0: @@ -148,10 +195,25 @@ ; CHECKIFD-NEXT: flt.d a0, fa0, fa1 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_olt: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: flt.d a0, a0, a1 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_olt: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a0, a2 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_olt: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: flt.d a0, a0, a1 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_olt: ; RV32I: # %bb.0: @@ -183,10 +245,25 @@ ; CHECKIFD-NEXT: fle.d a0, fa0, fa1 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_ole: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: fle.d a0, a0, a1 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_ole: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: fle.d a0, a0, a2 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_ole: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: fle.d a0, a0, a1 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_ole: ; RV32I: # %bb.0: @@ -220,12 +297,29 @@ ; CHECKIFD-NEXT: or a0, a1, a0 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_one: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: flt.d a2, a0, a1 -; CHECKIZFINXZDINX-NEXT: flt.d a0, a1, a0 -; CHECKIZFINXZDINX-NEXT: or a0, a0, a2 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_one: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: flt.d a4, a0, a2 +; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 +; CHECKRV32IZFINXZDINX-NEXT: or a0, a0, a4 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_one: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: flt.d a2, a0, a1 +; CHECKRV64IZFINXZDINX-NEXT: flt.d a0, a1, a0 +; CHECKRV64IZFINXZDINX-NEXT: or a0, a0, a2 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_one: ; RV32I: # %bb.0: @@ -293,12 +387,29 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_ord: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: feq.d a1, a1, a1 -; CHECKIZFINXZDINX-NEXT: feq.d a0, a0, a0 -; CHECKIZFINXZDINX-NEXT: and a0, a0, a1 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_ord: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: feq.d a2, a2, a2 +; CHECKRV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; CHECKRV32IZFINXZDINX-NEXT: and a0, a0, a2 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_ord: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: feq.d a1, a1, a1 +; CHECKRV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; CHECKRV64IZFINXZDINX-NEXT: and a0, a0, a1 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_ord: ; RV32I: # %bb.0: @@ -333,13 +444,31 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_ueq: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: flt.d a2, a0, a1 -; CHECKIZFINXZDINX-NEXT: flt.d a0, a1, a0 -; CHECKIZFINXZDINX-NEXT: or a0, a0, a2 -; CHECKIZFINXZDINX-NEXT: xori a0, a0, 1 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_ueq: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: flt.d a4, a0, a2 +; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 +; CHECKRV32IZFINXZDINX-NEXT: or a0, a0, a4 +; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_ueq: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: flt.d a2, a0, a1 +; CHECKRV64IZFINXZDINX-NEXT: flt.d a0, a1, a0 +; CHECKRV64IZFINXZDINX-NEXT: or a0, a0, a2 +; CHECKRV64IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_ueq: ; RV32I: # %bb.0: @@ -406,11 +535,27 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_ugt: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: fle.d a0, a0, a1 -; CHECKIZFINXZDINX-NEXT: xori a0, a0, 1 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_ugt: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: fle.d a0, a0, a2 +; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_ugt: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: fle.d a0, a0, a1 +; CHECKRV64IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_ugt: ; RV32I: # %bb.0: @@ -443,11 +588,27 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_uge: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: flt.d a0, a0, a1 -; CHECKIZFINXZDINX-NEXT: xori a0, a0, 1 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_uge: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a0, a2 +; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_uge: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: flt.d a0, a0, a1 +; CHECKRV64IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_uge: ; RV32I: # %bb.0: @@ -482,11 +643,27 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_ult: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: fle.d a0, a1, a0 -; CHECKIZFINXZDINX-NEXT: xori a0, a0, 1 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_ult: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: fle.d a0, a2, a0 +; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_ult: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: fle.d a0, a1, a0 +; CHECKRV64IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_ult: ; RV32I: # %bb.0: @@ -519,11 +696,27 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_ule: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: flt.d a0, a1, a0 -; CHECKIZFINXZDINX-NEXT: xori a0, a0, 1 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_ule: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: flt.d a0, a2, a0 +; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_ule: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: flt.d a0, a1, a0 +; CHECKRV64IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_ule: ; RV32I: # %bb.0: @@ -556,11 +749,27 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_une: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: feq.d a0, a0, a1 -; CHECKIZFINXZDINX-NEXT: xori a0, a0, 1 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_une: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: feq.d a0, a0, a2 +; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_une: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: feq.d a0, a0, a1 +; CHECKRV64IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_une: ; RV32I: # %bb.0: @@ -595,13 +804,31 @@ ; CHECKIFD-NEXT: xori a0, a0, 1 ; CHECKIFD-NEXT: ret ; -; CHECKIZFINXZDINX-LABEL: fcmp_uno: -; CHECKIZFINXZDINX: # %bb.0: -; CHECKIZFINXZDINX-NEXT: feq.d a1, a1, a1 -; CHECKIZFINXZDINX-NEXT: feq.d a0, a0, a0 -; CHECKIZFINXZDINX-NEXT: and a0, a0, a1 -; CHECKIZFINXZDINX-NEXT: xori a0, a0, 1 -; CHECKIZFINXZDINX-NEXT: ret +; CHECKRV32IZFINXZDINX-LABEL: fcmp_uno: +; CHECKRV32IZFINXZDINX: # %bb.0: +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32IZFINXZDINX-NEXT: feq.d a2, a2, a2 +; CHECKRV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; CHECKRV32IZFINXZDINX-NEXT: and a0, a0, a2 +; CHECKRV32IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32IZFINXZDINX-NEXT: ret +; +; CHECKRV64IZFINXZDINX-LABEL: fcmp_uno: +; CHECKRV64IZFINXZDINX: # %bb.0: +; CHECKRV64IZFINXZDINX-NEXT: feq.d a1, a1, a1 +; CHECKRV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; CHECKRV64IZFINXZDINX-NEXT: and a0, a0, a1 +; CHECKRV64IZFINXZDINX-NEXT: xori a0, a0, 1 +; CHECKRV64IZFINXZDINX-NEXT: ret ; ; RV32I-LABEL: fcmp_uno: ; RV32I: # %bb.0: diff --git a/llvm/test/CodeGen/RISCV/double-frem.ll b/llvm/test/CodeGen/RISCV/double-frem.ll --- a/llvm/test/CodeGen/RISCV/double-frem.ll +++ b/llvm/test/CodeGen/RISCV/double-frem.ll @@ -3,6 +3,8 @@ ; RUN: | FileCheck -check-prefix=RV32IFD %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64IFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s @@ -15,6 +17,15 @@ ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: tail fmod@plt ; +; RV32IZFINXZDINX-LABEL: frem_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call fmod@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: frem_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: tail fmod@plt diff --git a/llvm/test/CodeGen/RISCV/double-imm.ll b/llvm/test/CodeGen/RISCV/double-imm.ll --- a/llvm/test/CodeGen/RISCV/double-imm.ll +++ b/llvm/test/CodeGen/RISCV/double-imm.ll @@ -3,6 +3,8 @@ ; RUN: -target-abi=ilp32d | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64d | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck --check-prefix=CHECKRV32ZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64 | FileCheck --check-prefix=CHECKRV64ZDINX %s @@ -13,6 +15,14 @@ ; CHECK-NEXT: fld fa0, %lo(.LCPI0_0)(a0) ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: double_imm: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: lui a0, 345155 +; CHECKRV32ZDINX-NEXT: addi a0, a0, -744 +; CHECKRV32ZDINX-NEXT: lui a1, 262290 +; CHECKRV32ZDINX-NEXT: addi a1, a1, 507 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: double_imm: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: lui a0, %hi(.LCPI0_0) @@ -29,6 +39,24 @@ ; CHECK-NEXT: fadd.d fa0, fa0, fa5 ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: double_imm_op: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lui a2, %hi(.LCPI1_0) +; CHECKRV32ZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2) +; CHECKRV32ZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2) +; CHECKRV32ZDINX-NEXT: fadd.d a0, a0, a2 +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: double_imm_op: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: lui a1, %hi(.LCPI1_0) diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll --- a/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics-strict.ll @@ -5,6 +5,9 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \ ; RUN: -verify-machineinstrs -disable-strictnode-mutation -target-abi=lp64d \ ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zdinx \ +; RUN: -verify-machineinstrs -disable-strictnode-mutation -target-abi=ilp32 \ +; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zdinx \ ; RUN: -verify-machineinstrs -disable-strictnode-mutation -target-abi=lp64 \ ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s @@ -23,6 +26,21 @@ ; CHECKIFD-NEXT: fsqrt.d fa0, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: sqrt_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: sqrt_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fsqrt.d a0, a0 @@ -71,6 +89,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: powi_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call __powidf2@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: powi_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -124,6 +151,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: sin_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call sin@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: sin_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -175,6 +211,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: cos_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call cos@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: cos_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -243,6 +288,41 @@ ; RV64IFD-NEXT: addi sp, sp, 32 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: sincos_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: mv s0, a1 +; RV32IZFINXZDINX-NEXT: mv s1, a0 +; RV32IZFINXZDINX-NEXT: call sin@plt +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw s2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw s3, 4(sp) +; RV32IZFINXZDINX-NEXT: mv a0, s1 +; RV32IZFINXZDINX-NEXT: mv a1, s0 +; RV32IZFINXZDINX-NEXT: call cos@plt +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, s2, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: sincos_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -32 @@ -336,6 +416,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: pow_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call pow@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: pow_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -387,6 +476,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: exp_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call exp@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: exp_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -438,6 +536,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: exp2_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call exp2@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: exp2_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -489,6 +596,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: log_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call log@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: log_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -540,6 +656,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: log10_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call log10@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: log10_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -591,6 +716,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: log2_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call log2@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: log2_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -629,6 +763,29 @@ ; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fma_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fma_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmadd.d a0, a0, a1, a2 @@ -663,6 +820,29 @@ ; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmuladd_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmuladd_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmadd.d a0, a0, a1, a2 @@ -724,6 +904,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: minnum_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call fmin@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: minnum_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -775,6 +964,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: maxnum_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call fmax@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: maxnum_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -843,6 +1041,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: floor_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call floor@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: floor_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -894,6 +1101,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: ceil_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call ceil@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: ceil_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -945,6 +1161,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: trunc_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call trunc@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: trunc_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -996,6 +1221,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: rint_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call rint@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: rint_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -1047,6 +1281,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: nearbyint_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call nearbyint@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: nearbyint_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -1098,6 +1341,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: round_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call round@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: round_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -1149,6 +1401,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: roundeven_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call roundeven@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: roundeven_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -1192,6 +1453,17 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: lrint_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: lrint_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0 @@ -1231,6 +1503,17 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: lround_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: lround_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rmm @@ -1274,6 +1557,15 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: llrint_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call llrint@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: llrint_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0 @@ -1317,6 +1609,15 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: llround_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call llround@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: llround_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rmm diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll @@ -5,6 +5,9 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \ ; RUN: -verify-machineinstrs -target-abi=lp64d \ ; RUN: | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zdinx \ +; RUN: -verify-machineinstrs -target-abi=ilp32 \ +; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zdinx \ ; RUN: -verify-machineinstrs -target-abi=lp64 \ ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s @@ -21,6 +24,21 @@ ; CHECKIFD-NEXT: fsqrt.d fa0, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: sqrt_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fsqrt.d a0, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: sqrt_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fsqrt.d a0, a0 @@ -64,6 +82,15 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: powi_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call __powidf2@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: powi_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -103,6 +130,15 @@ ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: tail sin@plt ; +; RV32IZFINXZDINX-LABEL: sin_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call sin@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: sin_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: tail sin@plt @@ -135,6 +171,15 @@ ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: tail cos@plt ; +; RV32IZFINXZDINX-LABEL: cos_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call cos@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: cos_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: tail cos@plt @@ -198,6 +243,41 @@ ; RV64IFD-NEXT: addi sp, sp, 32 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: sincos_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: mv s0, a1 +; RV32IZFINXZDINX-NEXT: mv s1, a0 +; RV32IZFINXZDINX-NEXT: call sin@plt +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw s2, 0(sp) +; RV32IZFINXZDINX-NEXT: lw s3, 4(sp) +; RV32IZFINXZDINX-NEXT: mv a0, s1 +; RV32IZFINXZDINX-NEXT: mv a1, s0 +; RV32IZFINXZDINX-NEXT: call cos@plt +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, s2, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: sincos_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -32 @@ -277,6 +357,15 @@ ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: tail pow@plt ; +; RV32IZFINXZDINX-LABEL: pow_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call pow@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: pow_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: tail pow@plt @@ -309,6 +398,15 @@ ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: tail exp@plt ; +; RV32IZFINXZDINX-LABEL: exp_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call exp@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: exp_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: tail exp@plt @@ -341,6 +439,15 @@ ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: tail exp2@plt ; +; RV32IZFINXZDINX-LABEL: exp2_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call exp2@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: exp2_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: tail exp2@plt @@ -373,6 +480,15 @@ ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: tail log@plt ; +; RV32IZFINXZDINX-LABEL: log_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call log@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: log_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: tail log@plt @@ -405,6 +521,15 @@ ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: tail log10@plt ; +; RV32IZFINXZDINX-LABEL: log10_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call log10@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: log10_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: tail log10@plt @@ -437,6 +562,15 @@ ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: tail log2@plt ; +; RV32IZFINXZDINX-LABEL: log2_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call log2@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: log2_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: tail log2@plt @@ -470,6 +604,29 @@ ; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fma_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fma_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmadd.d a0, a0, a1, a2 @@ -504,6 +661,29 @@ ; CHECKIFD-NEXT: fmadd.d fa0, fa0, fa1, fa2 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fmuladd_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmadd.d a0, a0, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fmuladd_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmadd.d a0, a0, a1, a2 @@ -552,6 +732,12 @@ ; CHECKIFD-NEXT: fabs.d fa0, fa0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fabs_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: slli a1, a1, 1 +; RV32IZFINXZDINX-NEXT: srli a1, a1, 1 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fabs_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: slli a0, a0, 1 @@ -581,6 +767,25 @@ ; CHECKIFD-NEXT: fmin.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: minnum_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmin.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: minnum_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmin.d a0, a0, a1 @@ -615,6 +820,25 @@ ; CHECKIFD-NEXT: fmax.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: maxnum_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fmax.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: maxnum_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fmax.d a0, a0, a1 @@ -666,6 +890,25 @@ ; CHECKIFD-NEXT: fsgnj.d fa0, fa0, fa1 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: copysign_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fsgnj.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: copysign_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fsgnj.d a0, a0, a1 @@ -713,6 +956,15 @@ ; RV64IFD-NEXT: .LBB17_2: ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: floor_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call floor@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: floor_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI17_0) @@ -769,6 +1021,15 @@ ; RV64IFD-NEXT: .LBB18_2: ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: ceil_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call ceil@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: ceil_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI18_0) @@ -825,6 +1086,15 @@ ; RV64IFD-NEXT: .LBB19_2: ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: trunc_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call trunc@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: trunc_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI19_0) @@ -881,6 +1151,15 @@ ; RV64IFD-NEXT: .LBB20_2: ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: rint_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call rint@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: rint_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI20_0) @@ -923,6 +1202,15 @@ ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: tail nearbyint@plt ; +; RV32IZFINXZDINX-LABEL: nearbyint_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call nearbyint@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: nearbyint_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: tail nearbyint@plt @@ -969,6 +1257,15 @@ ; RV64IFD-NEXT: .LBB22_2: ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: round_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call round@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: round_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI22_0) @@ -1025,6 +1322,15 @@ ; RV64IFD-NEXT: .LBB23_2: ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: roundeven_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call roundeven@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: roundeven_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI23_0) @@ -1073,6 +1379,17 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: lrint_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: lrint_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0 @@ -1113,6 +1430,17 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: lround_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: lround_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rmm @@ -1145,6 +1473,17 @@ ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rmm ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: lround_i32_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: lround_i32_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm @@ -1188,6 +1527,15 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: llrint_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call llrint@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: llrint_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0 @@ -1231,6 +1579,15 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: llround_f64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call llround@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: llround_f64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rmm @@ -1266,6 +1623,20 @@ ; CHECKIFD-NEXT: snez a0, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: isnan_d_fpclass: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fclass.d a0, a0 +; RV32IZFINXZDINX-NEXT: andi a0, a0, 768 +; RV32IZFINXZDINX-NEXT: snez a0, a0 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: isnan_d_fpclass: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fclass.d a0, a0 diff --git a/llvm/test/CodeGen/RISCV/double-isnan.ll b/llvm/test/CodeGen/RISCV/double-isnan.ll --- a/llvm/test/CodeGen/RISCV/double-isnan.ll +++ b/llvm/test/CodeGen/RISCV/double-isnan.ll @@ -3,6 +3,8 @@ ; RUN: < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi lp64d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -target-abi ilp32 -verify-machineinstrs \ +; RUN: < %s | FileCheck --check-prefix=CHECKRV32ZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -target-abi lp64 -verify-machineinstrs \ ; RUN: < %s | FileCheck --check-prefix=CHECKRV64ZDINX %s @@ -13,6 +15,18 @@ ; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: double_is_nan: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: feq.d a0, a0, a0 +; CHECKRV32ZDINX-NEXT: xori a0, a0, 1 +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: double_is_nan: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: feq.d a0, a0, a0 @@ -28,6 +42,17 @@ ; CHECK-NEXT: feq.d a0, fa0, fa0 ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: double_not_nan: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: feq.d a0, a0, a0 +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: double_not_nan: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: feq.d a0, a0, a0 diff --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll --- a/llvm/test/CodeGen/RISCV/double-mem.ll +++ b/llvm/test/CodeGen/RISCV/double-mem.ll @@ -3,6 +3,8 @@ ; RUN: -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64 | FileCheck -check-prefixes=RV64IZFINXZDINX %s @@ -14,6 +16,21 @@ ; CHECKIFD-NEXT: fadd.d fa0, fa5, fa4 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fld: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: lw a2, 0(a0) +; RV32IZFINXZDINX-NEXT: lw a3, 4(a0) +; RV32IZFINXZDINX-NEXT: lw a1, 28(a0) +; RV32IZFINXZDINX-NEXT: lw a0, 24(a0) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a2, a0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fld: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: ld a1, 0(a0) @@ -37,6 +54,25 @@ ; CHECKIFD-NEXT: fsd fa5, 64(a0) ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fsd: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a3, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a4, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a4, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a5, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a2, a2, a4 +; RV32IZFINXZDINX-NEXT: sw a2, 0(a0) +; RV32IZFINXZDINX-NEXT: sw a3, 4(a0) +; RV32IZFINXZDINX-NEXT: sw a2, 64(a0) +; RV32IZFINXZDINX-NEXT: sw a3, 68(a0) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fsd: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a1, a1, a2 @@ -67,6 +103,35 @@ ; CHECKIFD-NEXT: fsd fa0, 72(a1) ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fld_fsd_global: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: lui a2, %hi(G) +; RV32IZFINXZDINX-NEXT: lw a4, %lo(G)(a2) +; RV32IZFINXZDINX-NEXT: lw a5, %lo(G+4)(a2) +; RV32IZFINXZDINX-NEXT: addi a3, a2, %lo(G) +; RV32IZFINXZDINX-NEXT: sw a0, %lo(G)(a2) +; RV32IZFINXZDINX-NEXT: sw a1, %lo(G+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a4, 72(a3) +; RV32IZFINXZDINX-NEXT: lw a5, 76(a3) +; RV32IZFINXZDINX-NEXT: sw a0, 72(a3) +; RV32IZFINXZDINX-NEXT: sw a1, 76(a3) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fld_fsd_global: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, a1 @@ -107,6 +172,26 @@ ; RV64IFD-NEXT: fsd fa0, -273(a0) ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fld_fsd_constant: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lui a2, 912092 +; RV32IZFINXZDINX-NEXT: lw a4, -273(a2) +; RV32IZFINXZDINX-NEXT: lw a5, -269(a2) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a4 +; RV32IZFINXZDINX-NEXT: sw a0, -273(a2) +; RV32IZFINXZDINX-NEXT: sw a1, -269(a2) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fld_fsd_constant: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lui a1, 228023 @@ -155,6 +240,31 @@ ; RV64IFD-NEXT: addi sp, sp, 32 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fld_stack: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw s0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw s1, 4(sp) +; RV32IZFINXZDINX-NEXT: addi a0, sp, 8 +; RV32IZFINXZDINX-NEXT: call notdead@plt +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, s0 +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fld_stack: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -32 @@ -201,6 +311,27 @@ ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fsd_stack: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, a2 +; RV32IZFINXZDINX-NEXT: sw a0, 16(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 20(sp) +; RV32IZFINXZDINX-NEXT: addi a0, sp, 16 +; RV32IZFINXZDINX-NEXT: call notdead@plt +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fsd_stack: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 @@ -227,6 +358,18 @@ ; CHECKIFD-NEXT: fsw fa5, 0(a0) ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: fsd_trunc: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw a1, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a2, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a3, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.s.d a1, a2 +; RV32IZFINXZDINX-NEXT: sw a1, 0(a0) +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: fsd_trunc: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.s.d a1, a1 diff --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll --- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll +++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll @@ -1,11 +1,17 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi=ilp32 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -target-abi=ilp32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s define double @test(double %a) nounwind { ; RV32IFD-LABEL: test: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: ret +; +; RV32IZFINXZDINX-LABEL: test: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: ret ret double %a } @@ -35,6 +41,32 @@ ; RV32IFD-NEXT: call abort@plt ; RV32IFD-NEXT: .LBB1_2: # %if.end ; RV32IFD-NEXT: call exit@plt +; +; RV32IZFINXZDINX-LABEL: main: +; RV32IZFINXZDINX: # %bb.0: # %entry +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: lui a1, 262144 +; RV32IZFINXZDINX-NEXT: li a0, 0 +; RV32IZFINXZDINX-NEXT: call test@plt +; RV32IZFINXZDINX-NEXT: sw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 0(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 4(sp) +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2) +; RV32IZFINXZDINX-NEXT: lui a4, %hi(.LCPI1_1) +; RV32IZFINXZDINX-NEXT: lw a5, %lo(.LCPI1_1+4)(a4) +; RV32IZFINXZDINX-NEXT: lw a4, %lo(.LCPI1_1)(a4) +; RV32IZFINXZDINX-NEXT: flt.d a2, a0, a2 +; RV32IZFINXZDINX-NEXT: flt.d a0, a4, a0 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: beqz a0, .LBB1_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.then +; RV32IZFINXZDINX-NEXT: call abort@plt +; RV32IZFINXZDINX-NEXT: .LBB1_2: # %if.end +; RV32IZFINXZDINX-NEXT: call exit@plt entry: %call = call double @test(double 2.000000e+00) %cmp = fcmp olt double %call, 2.400000e-01 diff --git a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll --- a/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/double-round-conv-sat.ll @@ -3,6 +3,8 @@ ; RUN: -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64 | FileCheck -check-prefixes=RV64IZFINXZDINX %s @@ -16,6 +18,22 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_si32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rdn +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rdn @@ -78,6 +96,52 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_si64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call floor@plt +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2) +; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2 +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lui a4, 524288 +; RV32IZFINXZDINX-NEXT: lui a2, 524288 +; RV32IZFINXZDINX-NEXT: beqz s0, .LBB1_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: +; RV32IZFINXZDINX-NEXT: mv a2, a1 +; RV32IZFINXZDINX-NEXT: .LBB1_2: +; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI1_1) +; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI1_1)(a1) +; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI1_1+4)(a1) +; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2 +; RV32IZFINXZDINX-NEXT: beqz a3, .LBB1_4 +; RV32IZFINXZDINX-NEXT: # %bb.3: +; RV32IZFINXZDINX-NEXT: addi a2, a4, -1 +; RV32IZFINXZDINX-NEXT: .LBB1_4: +; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2 +; RV32IZFINXZDINX-NEXT: neg a4, a1 +; RV32IZFINXZDINX-NEXT: and a1, a4, a2 +; RV32IZFINXZDINX-NEXT: neg a2, s0 +; RV32IZFINXZDINX-NEXT: and a0, a2, a0 +; RV32IZFINXZDINX-NEXT: neg a2, a3 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a0, a4, a0 +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rdn @@ -101,6 +165,22 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_ui32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rdn +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rdn @@ -149,6 +229,38 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_ui64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call floor@plt +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero +; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg s2, a2 +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI3_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI3_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI3_0)(a2) +; RV32IZFINXZDINX-NEXT: and a0, s2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a1, s2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a2, a1 +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rdn @@ -172,6 +284,22 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_si32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rup +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rup @@ -234,6 +362,52 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_si64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call ceil@plt +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI5_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI5_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI5_0)(a2) +; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2 +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lui a4, 524288 +; RV32IZFINXZDINX-NEXT: lui a2, 524288 +; RV32IZFINXZDINX-NEXT: beqz s0, .LBB5_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: +; RV32IZFINXZDINX-NEXT: mv a2, a1 +; RV32IZFINXZDINX-NEXT: .LBB5_2: +; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI5_1) +; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI5_1)(a1) +; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI5_1+4)(a1) +; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2 +; RV32IZFINXZDINX-NEXT: beqz a3, .LBB5_4 +; RV32IZFINXZDINX-NEXT: # %bb.3: +; RV32IZFINXZDINX-NEXT: addi a2, a4, -1 +; RV32IZFINXZDINX-NEXT: .LBB5_4: +; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2 +; RV32IZFINXZDINX-NEXT: neg a4, a1 +; RV32IZFINXZDINX-NEXT: and a1, a4, a2 +; RV32IZFINXZDINX-NEXT: neg a2, s0 +; RV32IZFINXZDINX-NEXT: and a0, a2, a0 +; RV32IZFINXZDINX-NEXT: neg a2, a3 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a0, a4, a0 +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rup @@ -257,6 +431,22 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_ui32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rup +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rup @@ -305,6 +495,38 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_ui64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call ceil@plt +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero +; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg s2, a2 +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI7_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI7_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI7_0)(a2) +; RV32IZFINXZDINX-NEXT: and a0, s2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a1, s2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a2, a1 +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rup @@ -328,6 +550,22 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_si32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rtz +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rtz @@ -390,6 +628,52 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_si64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call trunc@plt +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI9_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI9_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI9_0)(a2) +; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2 +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lui a4, 524288 +; RV32IZFINXZDINX-NEXT: lui a2, 524288 +; RV32IZFINXZDINX-NEXT: beqz s0, .LBB9_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: +; RV32IZFINXZDINX-NEXT: mv a2, a1 +; RV32IZFINXZDINX-NEXT: .LBB9_2: +; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI9_1) +; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI9_1)(a1) +; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI9_1+4)(a1) +; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2 +; RV32IZFINXZDINX-NEXT: beqz a3, .LBB9_4 +; RV32IZFINXZDINX-NEXT: # %bb.3: +; RV32IZFINXZDINX-NEXT: addi a2, a4, -1 +; RV32IZFINXZDINX-NEXT: .LBB9_4: +; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2 +; RV32IZFINXZDINX-NEXT: neg a4, a1 +; RV32IZFINXZDINX-NEXT: and a1, a4, a2 +; RV32IZFINXZDINX-NEXT: neg a2, s0 +; RV32IZFINXZDINX-NEXT: and a0, a2, a0 +; RV32IZFINXZDINX-NEXT: neg a2, a3 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a0, a4, a0 +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rtz @@ -413,6 +697,22 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_ui32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rtz +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rtz @@ -461,6 +761,38 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_ui64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call trunc@plt +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero +; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg s2, a2 +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI11_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI11_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI11_0)(a2) +; RV32IZFINXZDINX-NEXT: and a0, s2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a1, s2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a2, a1 +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rtz @@ -484,6 +816,22 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_si32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rmm +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rmm @@ -546,6 +894,52 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_si64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call round@plt +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI13_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI13_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI13_0)(a2) +; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2 +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lui a4, 524288 +; RV32IZFINXZDINX-NEXT: lui a2, 524288 +; RV32IZFINXZDINX-NEXT: beqz s0, .LBB13_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: +; RV32IZFINXZDINX-NEXT: mv a2, a1 +; RV32IZFINXZDINX-NEXT: .LBB13_2: +; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI13_1) +; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI13_1)(a1) +; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI13_1+4)(a1) +; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2 +; RV32IZFINXZDINX-NEXT: beqz a3, .LBB13_4 +; RV32IZFINXZDINX-NEXT: # %bb.3: +; RV32IZFINXZDINX-NEXT: addi a2, a4, -1 +; RV32IZFINXZDINX-NEXT: .LBB13_4: +; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2 +; RV32IZFINXZDINX-NEXT: neg a4, a1 +; RV32IZFINXZDINX-NEXT: and a1, a4, a2 +; RV32IZFINXZDINX-NEXT: neg a2, s0 +; RV32IZFINXZDINX-NEXT: and a0, a2, a0 +; RV32IZFINXZDINX-NEXT: neg a2, a3 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a0, a4, a0 +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rmm @@ -569,6 +963,22 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_ui32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rmm +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rmm @@ -617,6 +1027,38 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_ui64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call round@plt +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero +; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg s2, a2 +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI15_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI15_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI15_0)(a2) +; RV32IZFINXZDINX-NEXT: and a0, s2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a1, s2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a2, a1 +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rmm @@ -640,6 +1082,22 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_si32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rne +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rne @@ -702,6 +1160,52 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_si64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call roundeven@plt +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s2, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI17_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI17_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI17_0)(a2) +; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2 +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lui a4, 524288 +; RV32IZFINXZDINX-NEXT: lui a2, 524288 +; RV32IZFINXZDINX-NEXT: beqz s0, .LBB17_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: +; RV32IZFINXZDINX-NEXT: mv a2, a1 +; RV32IZFINXZDINX-NEXT: .LBB17_2: +; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI17_1) +; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI17_1)(a1) +; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI17_1+4)(a1) +; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2 +; RV32IZFINXZDINX-NEXT: beqz a3, .LBB17_4 +; RV32IZFINXZDINX-NEXT: # %bb.3: +; RV32IZFINXZDINX-NEXT: addi a2, a4, -1 +; RV32IZFINXZDINX-NEXT: .LBB17_4: +; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2 +; RV32IZFINXZDINX-NEXT: neg a4, a1 +; RV32IZFINXZDINX-NEXT: and a1, a4, a2 +; RV32IZFINXZDINX-NEXT: neg a2, s0 +; RV32IZFINXZDINX-NEXT: and a0, a2, a0 +; RV32IZFINXZDINX-NEXT: neg a2, a3 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a0, a4, a0 +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rne @@ -725,6 +1229,22 @@ ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_ui32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rne +; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 +; RV32IZFINXZDINX-NEXT: seqz a0, a0 +; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 +; RV32IZFINXZDINX-NEXT: and a0, a0, a2 +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rne @@ -773,6 +1293,38 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_ui64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: call roundeven@plt +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero +; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg s2, a2 +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI19_0) +; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI19_0+4)(a2) +; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI19_0)(a2) +; RV32IZFINXZDINX-NEXT: and a0, s2, a0 +; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 +; RV32IZFINXZDINX-NEXT: neg a2, a2 +; RV32IZFINXZDINX-NEXT: or a0, a2, a0 +; RV32IZFINXZDINX-NEXT: and a1, s2, a1 +; RV32IZFINXZDINX-NEXT: or a1, a2, a1 +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rne diff --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll --- a/llvm/test/CodeGen/RISCV/double-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll @@ -3,6 +3,8 @@ ; RUN: -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64 | FileCheck -check-prefixes=RV64IZFINXZDINX %s @@ -17,6 +19,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rdn ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_si8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rdn +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_si8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rdn @@ -37,6 +51,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rdn ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_si16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rdn +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_si16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rdn @@ -52,6 +78,18 @@ ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rdn ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_si32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rdn +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rdn @@ -79,6 +117,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rdn ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_si64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call floor@plt +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rdn @@ -99,6 +149,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rdn ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_ui8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rdn +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_ui8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rdn @@ -119,6 +181,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rdn ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_ui16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rdn +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_ui16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rdn @@ -134,6 +208,18 @@ ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rdn ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_ui32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rdn +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rdn @@ -161,6 +247,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rdn ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_ui64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call floor@plt +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rdn @@ -181,6 +279,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rup ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_si8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rup +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_si8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rup @@ -201,6 +311,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rup ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_si16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rup +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_si16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rup @@ -216,6 +338,18 @@ ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rup ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_si32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rup +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rup @@ -243,6 +377,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rup ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_si64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call ceil@plt +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rup @@ -263,6 +409,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rup ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_ui8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rup +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_ui8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rup @@ -283,6 +441,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rup ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_ui16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rup +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_ui16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rup @@ -298,6 +468,18 @@ ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rup ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_ui32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rup +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rup @@ -325,6 +507,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rup ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_ui64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call ceil@plt +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rup @@ -345,6 +539,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_si8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_si8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rtz @@ -365,6 +571,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_si16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_si16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rtz @@ -380,6 +598,18 @@ ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rtz ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_si32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rtz @@ -407,6 +637,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_si64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call trunc@plt +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rtz @@ -427,6 +669,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_ui8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_ui8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rtz @@ -447,6 +701,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_ui16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_ui16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rtz @@ -462,6 +728,18 @@ ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_ui32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rtz @@ -489,6 +767,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_ui64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call trunc@plt +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rtz @@ -509,6 +799,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_si8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_si8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rmm @@ -529,6 +831,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_si16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_si16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rmm @@ -544,6 +858,18 @@ ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rmm ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_si32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rmm @@ -571,6 +897,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_si64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call round@plt +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rmm @@ -591,6 +929,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rmm ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_ui8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rmm +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_ui8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rmm @@ -611,6 +961,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rmm ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_ui16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rmm +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_ui16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rmm @@ -626,6 +988,18 @@ ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rmm ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_ui32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rmm +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rmm @@ -653,6 +1027,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rmm ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_ui64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call round@plt +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rmm @@ -673,6 +1059,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rne ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_si8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rne +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_si8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rne @@ -693,6 +1091,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rne ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_si16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rne +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_si16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rne @@ -708,6 +1118,18 @@ ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rne ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_si32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rne +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a0, a0, rne @@ -735,6 +1157,18 @@ ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rne ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_si64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call roundeven@plt +; RV32IZFINXZDINX-NEXT: call __fixdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rne @@ -755,6 +1189,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rne ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_ui8: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rne +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_ui8: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rne @@ -775,6 +1221,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rne ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_ui16: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rne +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_ui16: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rne @@ -790,6 +1248,18 @@ ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rne ; CHECKIFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_ui32: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rne +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a0, a0, rne @@ -817,6 +1287,18 @@ ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rne ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_ui64: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call roundeven@plt +; RV32IZFINXZDINX-NEXT: call __fixunsdfdi@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rne @@ -845,6 +1327,17 @@ ; RV64IFD-NEXT: .LBB40_2: ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_floor_double: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call floor@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_floor_double: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI40_0) @@ -881,6 +1374,17 @@ ; RV64IFD-NEXT: .LBB41_2: ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_ceil_double: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call ceil@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_ceil_double: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI41_0) @@ -917,6 +1421,17 @@ ; RV64IFD-NEXT: .LBB42_2: ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_trunc_double: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call trunc@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_trunc_double: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI42_0) @@ -953,6 +1468,17 @@ ; RV64IFD-NEXT: .LBB43_2: ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_round_double: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call round@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_round_double: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI43_0) @@ -989,6 +1515,17 @@ ; RV64IFD-NEXT: .LBB44_2: ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: test_roundeven_double: +; RV32IZFINXZDINX: # %bb.0: +; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 +; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZFINXZDINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: .cfi_offset ra, -4 +; RV32IZFINXZDINX-NEXT: call roundeven@plt +; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: test_roundeven_double: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI44_0) diff --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll @@ -3,6 +3,8 @@ ; RUN: -target-abi=ilp32d | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64d | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck --check-prefix=CHECKRV32ZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64 | FileCheck --check-prefix=CHECKRV64ZDINX %s @@ -12,6 +14,12 @@ ; CHECK-NEXT: fmv.d fa0, fa1 ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_false: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: mv a1, a3 +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_false: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: mv a0, a1 @@ -31,6 +39,29 @@ ; CHECK-NEXT: .LBB1_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_oeq: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: feq.d a4, a0, a2 +; CHECKRV32ZDINX-NEXT: bnez a4, .LBB1_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB1_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_oeq: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: feq.d a2, a0, a1 @@ -54,6 +85,29 @@ ; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_ogt: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: flt.d a4, a2, a0 +; CHECKRV32ZDINX-NEXT: bnez a4, .LBB2_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB2_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_ogt: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: flt.d a2, a1, a0 @@ -77,6 +131,29 @@ ; CHECK-NEXT: .LBB3_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_oge: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: fle.d a4, a2, a0 +; CHECKRV32ZDINX-NEXT: bnez a4, .LBB3_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB3_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_oge: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: fle.d a2, a1, a0 @@ -100,6 +177,29 @@ ; CHECK-NEXT: .LBB4_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_olt: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: flt.d a4, a0, a2 +; CHECKRV32ZDINX-NEXT: bnez a4, .LBB4_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB4_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_olt: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: flt.d a2, a0, a1 @@ -123,6 +223,29 @@ ; CHECK-NEXT: .LBB5_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_ole: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: fle.d a4, a0, a2 +; CHECKRV32ZDINX-NEXT: bnez a4, .LBB5_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB5_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_ole: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: fle.d a2, a0, a1 @@ -148,6 +271,31 @@ ; CHECK-NEXT: .LBB6_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_one: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: flt.d a4, a0, a2 +; CHECKRV32ZDINX-NEXT: flt.d a5, a2, a0 +; CHECKRV32ZDINX-NEXT: or a4, a5, a4 +; CHECKRV32ZDINX-NEXT: bnez a4, .LBB6_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB6_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_one: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: flt.d a2, a0, a1 @@ -175,6 +323,31 @@ ; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_ord: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: feq.d a4, a2, a2 +; CHECKRV32ZDINX-NEXT: feq.d a5, a0, a0 +; CHECKRV32ZDINX-NEXT: and a4, a5, a4 +; CHECKRV32ZDINX-NEXT: bnez a4, .LBB7_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB7_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_ord: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: feq.d a2, a1, a1 @@ -202,6 +375,31 @@ ; CHECK-NEXT: .LBB8_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_ueq: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: flt.d a4, a0, a2 +; CHECKRV32ZDINX-NEXT: flt.d a5, a2, a0 +; CHECKRV32ZDINX-NEXT: or a4, a5, a4 +; CHECKRV32ZDINX-NEXT: beqz a4, .LBB8_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB8_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_ueq: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: flt.d a2, a0, a1 @@ -227,6 +425,29 @@ ; CHECK-NEXT: .LBB9_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_ugt: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: fle.d a4, a0, a2 +; CHECKRV32ZDINX-NEXT: beqz a4, .LBB9_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB9_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_ugt: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: fle.d a2, a0, a1 @@ -250,6 +471,29 @@ ; CHECK-NEXT: .LBB10_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_uge: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: flt.d a4, a0, a2 +; CHECKRV32ZDINX-NEXT: beqz a4, .LBB10_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB10_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_uge: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: flt.d a2, a0, a1 @@ -273,6 +517,29 @@ ; CHECK-NEXT: .LBB11_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_ult: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: fle.d a4, a2, a0 +; CHECKRV32ZDINX-NEXT: beqz a4, .LBB11_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB11_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_ult: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: fle.d a2, a1, a0 @@ -296,6 +563,29 @@ ; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_ule: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: flt.d a4, a2, a0 +; CHECKRV32ZDINX-NEXT: beqz a4, .LBB12_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB12_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_ule: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: flt.d a2, a1, a0 @@ -319,6 +609,29 @@ ; CHECK-NEXT: .LBB13_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_une: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: feq.d a4, a0, a2 +; CHECKRV32ZDINX-NEXT: beqz a4, .LBB13_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB13_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_une: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: feq.d a2, a0, a1 @@ -344,6 +657,31 @@ ; CHECK-NEXT: .LBB14_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_uno: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: feq.d a4, a2, a2 +; CHECKRV32ZDINX-NEXT: feq.d a5, a0, a0 +; CHECKRV32ZDINX-NEXT: and a4, a5, a4 +; CHECKRV32ZDINX-NEXT: beqz a4, .LBB14_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a2 +; CHECKRV32ZDINX-NEXT: .LBB14_2: +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_uno: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: feq.d a2, a1, a1 @@ -364,6 +702,10 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_true: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_true: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: ret @@ -383,6 +725,26 @@ ; CHECK-NEXT: .LBB16_2: ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: i32_select_fcmp_oeq: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: feq.d a1, a0, a2 +; CHECKRV32ZDINX-NEXT: mv a0, a4 +; CHECKRV32ZDINX-NEXT: bnez a1, .LBB16_2 +; CHECKRV32ZDINX-NEXT: # %bb.1: +; CHECKRV32ZDINX-NEXT: mv a0, a5 +; CHECKRV32ZDINX-NEXT: .LBB16_2: +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: i32_select_fcmp_oeq: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: feq.d a1, a0, a1 @@ -405,6 +767,24 @@ ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_oeq_1_2: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: feq.d a0, a0, a2 +; CHECKRV32ZDINX-NEXT: li a1, 2 +; CHECKRV32ZDINX-NEXT: sub a0, a1, a0 +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_oeq_1_2: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: feq.d a0, a0, a1 @@ -423,6 +803,22 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_uge_negone_zero: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: fle.d a0, a0, a2 +; CHECKRV32ZDINX-NEXT: addi a0, a0, -1 +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_uge_negone_zero: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: fle.d a0, a0, a1 @@ -440,6 +836,22 @@ ; CHECK-NEXT: addi a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKRV32ZDINX-LABEL: select_fcmp_uge_1_2: +; CHECKRV32ZDINX: # %bb.0: +; CHECKRV32ZDINX-NEXT: addi sp, sp, -16 +; CHECKRV32ZDINX-NEXT: sw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a2, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a3, 12(sp) +; CHECKRV32ZDINX-NEXT: sw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: sw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: lw a0, 8(sp) +; CHECKRV32ZDINX-NEXT: lw a1, 12(sp) +; CHECKRV32ZDINX-NEXT: fle.d a0, a0, a2 +; CHECKRV32ZDINX-NEXT: addi a0, a0, 1 +; CHECKRV32ZDINX-NEXT: addi sp, sp, 16 +; CHECKRV32ZDINX-NEXT: ret +; ; CHECKRV64ZDINX-LABEL: select_fcmp_uge_1_2: ; CHECKRV64ZDINX: # %bb.0: ; CHECKRV64ZDINX-NEXT: fle.d a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/double-select-icmp.ll b/llvm/test/CodeGen/RISCV/double-select-icmp.ll --- a/llvm/test/CodeGen/RISCV/double-select-icmp.ll +++ b/llvm/test/CodeGen/RISCV/double-select-icmp.ll @@ -4,6 +4,8 @@ ; RUN: -target-abi=ilp32d | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64d | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=RV32ZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64 | FileCheck -check-prefixes=RV64ZDINX %s @@ -16,6 +18,28 @@ ; CHECK-NEXT: .LBB0_2: ; CHECK-NEXT: ret ; +; RV32ZDINX-LABEL: select_icmp_eq: +; RV32ZDINX: # %bb.0: +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: sw a2, 8(sp) +; RV32ZDINX-NEXT: sw a3, 12(sp) +; RV32ZDINX-NEXT: bne a0, a1, .LBB0_2 +; RV32ZDINX-NEXT: # %bb.1: +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: .LBB0_2: +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a0, 8(sp) +; RV32ZDINX-NEXT: lw a1, 12(sp) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; ; RV64ZDINX-LABEL: select_icmp_eq: ; RV64ZDINX: # %bb.0: ; RV64ZDINX-NEXT: beq a0, a1, .LBB0_2 @@ -38,6 +62,28 @@ ; CHECK-NEXT: .LBB1_2: ; CHECK-NEXT: ret ; +; RV32ZDINX-LABEL: select_icmp_ne: +; RV32ZDINX: # %bb.0: +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: sw a2, 8(sp) +; RV32ZDINX-NEXT: sw a3, 12(sp) +; RV32ZDINX-NEXT: beq a0, a1, .LBB1_2 +; RV32ZDINX-NEXT: # %bb.1: +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: .LBB1_2: +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a0, 8(sp) +; RV32ZDINX-NEXT: lw a1, 12(sp) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; ; RV64ZDINX-LABEL: select_icmp_ne: ; RV64ZDINX: # %bb.0: ; RV64ZDINX-NEXT: bne a0, a1, .LBB1_2 @@ -60,6 +106,28 @@ ; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: ret ; +; RV32ZDINX-LABEL: select_icmp_ugt: +; RV32ZDINX: # %bb.0: +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: sw a2, 8(sp) +; RV32ZDINX-NEXT: sw a3, 12(sp) +; RV32ZDINX-NEXT: bgeu a1, a0, .LBB2_2 +; RV32ZDINX-NEXT: # %bb.1: +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: .LBB2_2: +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a0, 8(sp) +; RV32ZDINX-NEXT: lw a1, 12(sp) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; ; RV64ZDINX-LABEL: select_icmp_ugt: ; RV64ZDINX: # %bb.0: ; RV64ZDINX-NEXT: bltu a1, a0, .LBB2_2 @@ -82,6 +150,28 @@ ; CHECK-NEXT: .LBB3_2: ; CHECK-NEXT: ret ; +; RV32ZDINX-LABEL: select_icmp_uge: +; RV32ZDINX: # %bb.0: +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: sw a2, 8(sp) +; RV32ZDINX-NEXT: sw a3, 12(sp) +; RV32ZDINX-NEXT: bltu a0, a1, .LBB3_2 +; RV32ZDINX-NEXT: # %bb.1: +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: .LBB3_2: +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a0, 8(sp) +; RV32ZDINX-NEXT: lw a1, 12(sp) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; ; RV64ZDINX-LABEL: select_icmp_uge: ; RV64ZDINX: # %bb.0: ; RV64ZDINX-NEXT: bgeu a0, a1, .LBB3_2 @@ -104,6 +194,28 @@ ; CHECK-NEXT: .LBB4_2: ; CHECK-NEXT: ret ; +; RV32ZDINX-LABEL: select_icmp_ult: +; RV32ZDINX: # %bb.0: +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: sw a2, 8(sp) +; RV32ZDINX-NEXT: sw a3, 12(sp) +; RV32ZDINX-NEXT: bgeu a0, a1, .LBB4_2 +; RV32ZDINX-NEXT: # %bb.1: +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: .LBB4_2: +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a0, 8(sp) +; RV32ZDINX-NEXT: lw a1, 12(sp) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; ; RV64ZDINX-LABEL: select_icmp_ult: ; RV64ZDINX: # %bb.0: ; RV64ZDINX-NEXT: bltu a0, a1, .LBB4_2 @@ -126,6 +238,28 @@ ; CHECK-NEXT: .LBB5_2: ; CHECK-NEXT: ret ; +; RV32ZDINX-LABEL: select_icmp_ule: +; RV32ZDINX: # %bb.0: +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: sw a2, 8(sp) +; RV32ZDINX-NEXT: sw a3, 12(sp) +; RV32ZDINX-NEXT: bltu a1, a0, .LBB5_2 +; RV32ZDINX-NEXT: # %bb.1: +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: .LBB5_2: +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a0, 8(sp) +; RV32ZDINX-NEXT: lw a1, 12(sp) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; ; RV64ZDINX-LABEL: select_icmp_ule: ; RV64ZDINX: # %bb.0: ; RV64ZDINX-NEXT: bgeu a1, a0, .LBB5_2 @@ -148,6 +282,28 @@ ; CHECK-NEXT: .LBB6_2: ; CHECK-NEXT: ret ; +; RV32ZDINX-LABEL: select_icmp_sgt: +; RV32ZDINX: # %bb.0: +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: sw a2, 8(sp) +; RV32ZDINX-NEXT: sw a3, 12(sp) +; RV32ZDINX-NEXT: bge a1, a0, .LBB6_2 +; RV32ZDINX-NEXT: # %bb.1: +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: .LBB6_2: +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a0, 8(sp) +; RV32ZDINX-NEXT: lw a1, 12(sp) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; ; RV64ZDINX-LABEL: select_icmp_sgt: ; RV64ZDINX: # %bb.0: ; RV64ZDINX-NEXT: blt a1, a0, .LBB6_2 @@ -170,6 +326,28 @@ ; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: ret ; +; RV32ZDINX-LABEL: select_icmp_sge: +; RV32ZDINX: # %bb.0: +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: sw a2, 8(sp) +; RV32ZDINX-NEXT: sw a3, 12(sp) +; RV32ZDINX-NEXT: blt a0, a1, .LBB7_2 +; RV32ZDINX-NEXT: # %bb.1: +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: .LBB7_2: +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a0, 8(sp) +; RV32ZDINX-NEXT: lw a1, 12(sp) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; ; RV64ZDINX-LABEL: select_icmp_sge: ; RV64ZDINX: # %bb.0: ; RV64ZDINX-NEXT: bge a0, a1, .LBB7_2 @@ -192,6 +370,28 @@ ; CHECK-NEXT: .LBB8_2: ; CHECK-NEXT: ret ; +; RV32ZDINX-LABEL: select_icmp_slt: +; RV32ZDINX: # %bb.0: +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: sw a2, 8(sp) +; RV32ZDINX-NEXT: sw a3, 12(sp) +; RV32ZDINX-NEXT: bge a0, a1, .LBB8_2 +; RV32ZDINX-NEXT: # %bb.1: +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: .LBB8_2: +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a0, 8(sp) +; RV32ZDINX-NEXT: lw a1, 12(sp) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; ; RV64ZDINX-LABEL: select_icmp_slt: ; RV64ZDINX: # %bb.0: ; RV64ZDINX-NEXT: blt a0, a1, .LBB8_2 @@ -214,6 +414,28 @@ ; CHECK-NEXT: .LBB9_2: ; CHECK-NEXT: ret ; +; RV32ZDINX-LABEL: select_icmp_sle: +; RV32ZDINX: # %bb.0: +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: sw a2, 8(sp) +; RV32ZDINX-NEXT: sw a3, 12(sp) +; RV32ZDINX-NEXT: blt a1, a0, .LBB9_2 +; RV32ZDINX-NEXT: # %bb.1: +; RV32ZDINX-NEXT: lw a4, 8(sp) +; RV32ZDINX-NEXT: lw a5, 12(sp) +; RV32ZDINX-NEXT: .LBB9_2: +; RV32ZDINX-NEXT: sw a4, 8(sp) +; RV32ZDINX-NEXT: sw a5, 12(sp) +; RV32ZDINX-NEXT: lw a0, 8(sp) +; RV32ZDINX-NEXT: lw a1, 12(sp) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; ; RV64ZDINX-LABEL: select_icmp_sle: ; RV64ZDINX: # %bb.0: ; RV64ZDINX-NEXT: bge a1, a0, .LBB9_2 diff --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll --- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll +++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll @@ -3,6 +3,8 @@ ; RUN: | FileCheck -check-prefix=RV32IFD %s ; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi=lp64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64IFD %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -target-abi=ilp32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -target-abi=lp64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64IZFINXZDINX %s @@ -58,6 +60,44 @@ ; RV64IFD-NEXT: fmv.x.d a0, fa5 ; RV64IFD-NEXT: ret ; +; RV32IZFINXZDINX-LABEL: func: +; RV32IZFINXZDINX: # %bb.0: # %entry +; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 +; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: beqz a2, .LBB0_2 +; RV32IZFINXZDINX-NEXT: # %bb.1: # %if.else +; RV32IZFINXZDINX-NEXT: addi a2, a2, -1 +; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: call func@plt +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: fadd.d a0, a0, s0 +; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: j .LBB0_3 +; RV32IZFINXZDINX-NEXT: .LBB0_2: # %return +; RV32IZFINXZDINX-NEXT: sw s0, 8(sp) +; RV32IZFINXZDINX-NEXT: sw s1, 12(sp) +; RV32IZFINXZDINX-NEXT: .LBB0_3: # %return +; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) +; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) +; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 +; RV32IZFINXZDINX-NEXT: ret +; ; RV64IZFINXZDINX-LABEL: func: ; RV64IZFINXZDINX: # %bb.0: # %entry ; RV64IZFINXZDINX-NEXT: addi sp, sp, -16 diff --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll --- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll @@ -1746,9 +1746,11 @@ ; RV32IZDINXZHINX-LABEL: fcvt_h_d: ; RV32IZDINXZHINX: # %bb.0: ; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 -; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZDINXZHINX-NEXT: call __truncdfhf2@plt -; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: sw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: sw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: lw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: lw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: fcvt.h.d a0, a0 ; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 ; RV32IZDINXZHINX-NEXT: ret ; @@ -1806,9 +1808,11 @@ ; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_d: ; CHECK32-IZDINXZHINXMIN: # %bb.0: ; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 -; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; CHECK32-IZDINXZHINXMIN-NEXT: call __truncdfhf2@plt -; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.d a0, a0 ; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 ; CHECK32-IZDINXZHINXMIN-NEXT: ret ; @@ -1875,10 +1879,11 @@ ; RV32IZDINXZHINX-LABEL: fcvt_d_h: ; RV32IZDINXZHINX: # %bb.0: ; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 -; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 -; RV32IZDINXZHINX-NEXT: call __extendsfdf2@plt -; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: fcvt.d.h a0, a0 +; RV32IZDINXZHINX-NEXT: sw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: sw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: lw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: lw a1, 12(sp) ; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 ; RV32IZDINXZHINX-NEXT: ret ; @@ -1940,10 +1945,11 @@ ; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_d_h: ; CHECK32-IZDINXZHINXMIN: # %bb.0: ; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 -; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 -; CHECK32-IZDINXZHINXMIN-NEXT: call __extendsfdf2@plt -; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.d.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp) ; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 ; CHECK32-IZDINXZHINXMIN-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -3849,9 +3849,11 @@ ; RV32IZDINXZHINX-LABEL: fcvt_h_d: ; RV32IZDINXZHINX: # %bb.0: ; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 -; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZDINXZHINX-NEXT: call __truncdfhf2@plt -; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: sw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: sw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: lw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: lw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: fcvt.h.d a0, a0 ; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 ; RV32IZDINXZHINX-NEXT: ret ; @@ -3927,9 +3929,11 @@ ; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_d: ; CHECK32-IZDINXZHINXMIN: # %bb.0: ; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 -; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; CHECK32-IZDINXZHINXMIN-NEXT: call __truncdfhf2@plt -; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.d a0, a0 ; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 ; CHECK32-IZDINXZHINXMIN-NEXT: ret ; @@ -3995,10 +3999,11 @@ ; RV32IZDINXZHINX-LABEL: fcvt_d_h: ; RV32IZDINXZHINX: # %bb.0: ; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 -; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 -; RV32IZDINXZHINX-NEXT: call __extendsfdf2@plt -; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: fcvt.d.h a0, a0 +; RV32IZDINXZHINX-NEXT: sw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: sw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: lw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: lw a1, 12(sp) ; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 ; RV32IZDINXZHINX-NEXT: ret ; @@ -4086,10 +4091,11 @@ ; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_d_h: ; CHECK32-IZDINXZHINXMIN: # %bb.0: ; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 -; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill -; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 -; CHECK32-IZDINXZHINXMIN-NEXT: call __extendsfdf2@plt -; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.d.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp) ; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 ; CHECK32-IZDINXZHINXMIN-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll b/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/zdinx-boundary-check.ll @@ -0,0 +1,145 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefix=RV32ZDINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: -target-abi=lp64 | FileCheck -check-prefix=RV64ZDINX %s + +define void @foo(ptr nocapture %p, double %d) { +; RV32ZDINX-LABEL: foo: +; RV32ZDINX: # %bb.0: # %entry +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a1, 8(sp) +; RV32ZDINX-NEXT: sw a2, 12(sp) +; RV32ZDINX-NEXT: lw a2, 8(sp) +; RV32ZDINX-NEXT: lw a3, 12(sp) +; RV32ZDINX-NEXT: addi a0, a0, 2047 +; RV32ZDINX-NEXT: sw a2, -3(a0) +; RV32ZDINX-NEXT: sw a3, 1(a0) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; +; RV64ZDINX-LABEL: foo: +; RV64ZDINX: # %bb.0: # %entry +; RV64ZDINX-NEXT: sd a1, 2044(a0) +; RV64ZDINX-NEXT: ret +entry: + %add.ptr = getelementptr inbounds i8, ptr %p, i64 2044 + store double %d, ptr %add.ptr, align 8 + ret void +} + +define void @foo2(ptr nocapture %p, double %d) { +; RV32ZDINX-LABEL: foo2: +; RV32ZDINX: # %bb.0: # %entry +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a1, 8(sp) +; RV32ZDINX-NEXT: sw a2, 12(sp) +; RV32ZDINX-NEXT: lw a2, 8(sp) +; RV32ZDINX-NEXT: lw a3, 12(sp) +; RV32ZDINX-NEXT: fadd.d a2, a2, a2 +; RV32ZDINX-NEXT: addi a0, a0, 2047 +; RV32ZDINX-NEXT: sw a2, -3(a0) +; RV32ZDINX-NEXT: sw a3, 1(a0) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; +; RV64ZDINX-LABEL: foo2: +; RV64ZDINX: # %bb.0: # %entry +; RV64ZDINX-NEXT: fadd.d a1, a1, a1 +; RV64ZDINX-NEXT: sd a1, 2044(a0) +; RV64ZDINX-NEXT: ret +entry: + %a = fadd double %d, %d + %add.ptr = getelementptr inbounds i8, ptr %p, i64 2044 + store double %a, ptr %add.ptr, align 8 + ret void +} + +@d = global double 4.2, align 8 + +define void @foo3(ptr nocapture %p) { +; RV32ZDINX-LABEL: foo3: +; RV32ZDINX: # %bb.0: # %entry +; RV32ZDINX-NEXT: lui a1, %hi(d) +; RV32ZDINX-NEXT: lw a2, %lo(d)(a1) +; RV32ZDINX-NEXT: lw a3, %lo(d+4)(a1) +; RV32ZDINX-NEXT: addi a0, a0, 2047 +; RV32ZDINX-NEXT: sw a2, -3(a0) +; RV32ZDINX-NEXT: sw a3, 1(a0) +; RV32ZDINX-NEXT: ret +; +; RV64ZDINX-LABEL: foo3: +; RV64ZDINX: # %bb.0: # %entry +; RV64ZDINX-NEXT: lui a1, %hi(d) +; RV64ZDINX-NEXT: ld a1, %lo(d)(a1) +; RV64ZDINX-NEXT: sd a1, 2044(a0) +; RV64ZDINX-NEXT: ret +entry: + %0 = load double, ptr @d, align 8 + %add.ptr = getelementptr inbounds i8, ptr %p, i64 2044 + store double %0, ptr %add.ptr, align 8 + ret void +} + +define void @foo4(ptr %p) { +; RV32ZDINX-LABEL: foo4: +; RV32ZDINX: # %bb.0: # %entry +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a0, 8(sp) +; RV32ZDINX-NEXT: addi a0, a0, 2047 +; RV32ZDINX-NEXT: lw a1, 1(a0) +; RV32ZDINX-NEXT: lw a0, -3(a0) +; RV32ZDINX-NEXT: lui a2, %hi(d) +; RV32ZDINX-NEXT: sw a0, %lo(d)(a2) +; RV32ZDINX-NEXT: sw a1, %lo(d+4)(a2) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; +; RV64ZDINX-LABEL: foo4: +; RV64ZDINX: # %bb.0: # %entry +; RV64ZDINX-NEXT: addi sp, sp, -16 +; RV64ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV64ZDINX-NEXT: sd a0, 8(sp) +; RV64ZDINX-NEXT: ld a0, 2044(a0) +; RV64ZDINX-NEXT: lui a1, %hi(d) +; RV64ZDINX-NEXT: sd a0, %lo(d)(a1) +; RV64ZDINX-NEXT: addi sp, sp, 16 +; RV64ZDINX-NEXT: ret +entry: + %p.addr = alloca ptr, align 8 + store ptr %p, ptr %p.addr, align 8 + %0 = load ptr, ptr %p.addr, align 8 + %add.ptr = getelementptr inbounds i8, ptr %0, i64 2044 + %1 = load double, ptr %add.ptr, align 8 + store double %1, ptr @d, align 8 + ret void +} + +define void @foo5(ptr nocapture %p, double %d) { +; RV32ZDINX-LABEL: foo5: +; RV32ZDINX: # %bb.0: # %entry +; RV32ZDINX-NEXT: addi sp, sp, -16 +; RV32ZDINX-NEXT: .cfi_def_cfa_offset 16 +; RV32ZDINX-NEXT: sw a1, 8(sp) +; RV32ZDINX-NEXT: sw a2, 12(sp) +; RV32ZDINX-NEXT: lw a2, 8(sp) +; RV32ZDINX-NEXT: lw a3, 12(sp) +; RV32ZDINX-NEXT: addi a0, a0, -2048 +; RV32ZDINX-NEXT: sw a2, -1(a0) +; RV32ZDINX-NEXT: sw a3, 3(a0) +; RV32ZDINX-NEXT: addi sp, sp, 16 +; RV32ZDINX-NEXT: ret +; +; RV64ZDINX-LABEL: foo5: +; RV64ZDINX: # %bb.0: # %entry +; RV64ZDINX-NEXT: addi a0, a0, -2048 +; RV64ZDINX-NEXT: sd a1, -1(a0) +; RV64ZDINX-NEXT: ret +entry: + %add.ptr = getelementptr inbounds i8, ptr %p, i64 -2049 + store double %d, ptr %add.ptr, align 8 + ret void +}