diff --git a/llvm/lib/Target/LoongArch/LoongArch.h b/llvm/lib/Target/LoongArch/LoongArch.h --- a/llvm/lib/Target/LoongArch/LoongArch.h +++ b/llvm/lib/Target/LoongArch/LoongArch.h @@ -14,6 +14,7 @@ #ifndef LLVM_LIB_TARGET_LOONGARCH_LOONGARCH_H #define LLVM_LIB_TARGET_LOONGARCH_LOONGARCH_H +#include "MCTargetDesc/LoongArchBaseInfo.h" #include "llvm/Target/TargetMachine.h" namespace llvm { diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h @@ -27,6 +27,7 @@ FIRST_NUMBER = ISD::BUILTIN_OP_END, // TODO: add more LoongArchISDs + RET, }; } // namespace LoongArchISD @@ -39,6 +40,37 @@ const LoongArchSubtarget &STI); const LoongArchSubtarget &getSubtarget() const { return Subtarget; } + + // This method returns the name of a target specific DAG node. + const char *getTargetNodeName(unsigned Opcode) const override; + + // Lower incoming arguments, copy physregs into vregs. + SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, + bool IsVarArg, + const SmallVectorImpl &Ins, + const SDLoc &DL, SelectionDAG &DAG, + SmallVectorImpl &InVals) const override; + bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, + bool IsVarArg, + const SmallVectorImpl &Outs, + LLVMContext &Context) const override; + SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, const SDLoc &DL, + SelectionDAG &DAG) const override; + +private: + /// Target-specific function used to lower LoongArch calling conventions. + typedef bool LoongArchCCAssignFn(unsigned ValNo, MVT ValVT, + CCValAssign::LocInfo LocInfo, + CCState &State); + + void analyzeInputArgs(CCState &CCInfo, + const SmallVectorImpl &Ins, + LoongArchCCAssignFn Fn) const; + void analyzeOutputArgs(CCState &CCInfo, + const SmallVectorImpl &Outs, + LoongArchCCAssignFn Fn) const; }; } // end namespace llvm diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -43,3 +43,157 @@ const Align FunctionAlignment(4); setMinFunctionAlignment(FunctionAlignment); } + +const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const { + switch ((LoongArchISD::NodeType)Opcode) { + case LoongArchISD::FIRST_NUMBER: + break; + +#define NODE_NAME_CASE(node) \ + case LoongArchISD::node: \ + return "LoongArchISD::" #node; + + // TODO: Add more target-dependent nodes later. + NODE_NAME_CASE(RET) + } +#undef NODE_NAME_CASE + return nullptr; +} + +//===----------------------------------------------------------------------===// +// Calling Convention Implementation +//===----------------------------------------------------------------------===// +// FIXME: Now, we only support CallingConv::C with fixed arguments which are +// passed with integer registers. +static const MCPhysReg ArgGPRs[] = { + LoongArch::R4, LoongArch::R5, LoongArch::R6, LoongArch::R7, + LoongArch::R8, LoongArch::R9, LoongArch::R10, LoongArch::R11}; + +// Implements the LoongArch calling convention. Returns true upon failure. +static bool CC_LoongArch(unsigned ValNo, MVT ValVT, + CCValAssign::LocInfo LocInfo, CCState &State) { + // Allocate to a register if possible. + Register Reg = State.AllocateReg(ArgGPRs); + if (Reg) { + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, ValVT, LocInfo)); + return false; + } + + // TODO: Handle arguments passed without register. + return true; +} + +void LoongArchTargetLowering::analyzeInputArgs( + CCState &CCInfo, const SmallVectorImpl &Ins, + LoongArchCCAssignFn Fn) const { + for (unsigned i = 0, e = Ins.size(); i != e; ++i) { + MVT ArgVT = Ins[i].VT; + + if (Fn(i, ArgVT, CCValAssign::Full, CCInfo)) { + LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " + << EVT(ArgVT).getEVTString() << '\n'); + llvm_unreachable(""); + } + } +} + +void LoongArchTargetLowering::analyzeOutputArgs( + CCState &CCInfo, const SmallVectorImpl &Outs, + LoongArchCCAssignFn Fn) const { + for (unsigned i = 0, e = Outs.size(); i != e; ++i) { + MVT ArgVT = Outs[i].VT; + + if (Fn(i, ArgVT, CCValAssign::Full, CCInfo)) { + LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " + << EVT(ArgVT).getEVTString() << "\n"); + llvm_unreachable(""); + } + } +} + +static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, + const CCValAssign &VA, const SDLoc &DL, + const LoongArchTargetLowering &TLI) { + MachineFunction &MF = DAG.getMachineFunction(); + MachineRegisterInfo &RegInfo = MF.getRegInfo(); + EVT LocVT = VA.getLocVT(); + const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT()); + Register VReg = RegInfo.createVirtualRegister(RC); + RegInfo.addLiveIn(VA.getLocReg(), VReg); + + return DAG.getCopyFromReg(Chain, DL, VReg, LocVT); +} + +// Transform physical registers into virtual registers. +SDValue LoongArchTargetLowering::LowerFormalArguments( + SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl &Ins, const SDLoc &DL, + SelectionDAG &DAG, SmallVectorImpl &InVals) const { + + MachineFunction &MF = DAG.getMachineFunction(); + + switch (CallConv) { + default: + llvm_unreachable("Unsupported calling convention"); + case CallingConv::C: + break; + } + + // Assign locations to all of the incoming arguments. + SmallVector ArgLocs; + CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); + + analyzeInputArgs(CCInfo, Ins, CC_LoongArch); + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) + InVals.push_back(unpackFromRegLoc(DAG, Chain, ArgLocs[i], DL, *this)); + + return Chain; +} + +bool LoongArchTargetLowering::CanLowerReturn( + CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, + const SmallVectorImpl &Outs, LLVMContext &Context) const { + // Any return value split in to more than two values can't be returned + // directly. + return Outs.size() <= 2; +} + +SDValue LoongArchTargetLowering::LowerReturn( + SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, const SDLoc &DL, + SelectionDAG &DAG) const { + // Stores the assignment of the return value to a location. + SmallVector RVLocs; + + // Info about the registers and stack slot. + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); + + analyzeOutputArgs(CCInfo, Outs, CC_LoongArch); + + SDValue Glue; + SmallVector RetOps(1, Chain); + + // Copy the result values into the output registers. + for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { + CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); + + // Handle a 'normal' return. + Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Glue); + + // Guarantee that all emitted copies are stuck together. + Glue = Chain.getValue(1); + RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); + } + + RetOps[0] = Chain; // Update chain. + + // Add the glue node if we have it. + if (Glue.getNode()) + RetOps.push_back(Glue); + + return DAG.getNode(LoongArchISD::RET, DL, MVT::Other, RetOps); +} diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrFormats.td b/llvm/lib/Target/LoongArch/LoongArchInstrFormats.td --- a/llvm/lib/Target/LoongArch/LoongArchInstrFormats.td +++ b/llvm/lib/Target/LoongArch/LoongArchInstrFormats.td @@ -34,8 +34,10 @@ } // Pseudo instructions -class Pseudo : LAInst { +class Pseudo pattern = [], string asmstr = ""> + : LAInst { let isPseudo = 1; + let isCodeGenOnly = 1; } // 2R-type diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h --- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h +++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h @@ -13,6 +13,7 @@ #ifndef LLVM_LIB_TARGET_LOONGARCH_LOONGARCHINSTRINFO_H #define LLVM_LIB_TARGET_LOONGARCH_LOONGARCHINSTRINFO_H +#include "LoongArchRegisterInfo.h" #include "llvm/CodeGen/TargetInstrInfo.h" #define GET_INSTRINFO_HEADER @@ -25,6 +26,10 @@ class LoongArchInstrInfo : public LoongArchGenInstrInfo { public: explicit LoongArchInstrInfo(LoongArchSubtarget &STI); + + void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, + bool KillSrc) const override; }; } // end namespace llvm diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp --- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "LoongArchInstrInfo.h" +#include "LoongArch.h" using namespace llvm; @@ -20,3 +21,18 @@ LoongArchInstrInfo::LoongArchInstrInfo(LoongArchSubtarget &STI) // FIXME: add CFSetup and CFDestroy Inst when we implement function call. : LoongArchGenInstrInfo() {} + +void LoongArchInstrInfo::copyPhysReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + const DebugLoc &DL, MCRegister DstReg, + MCRegister SrcReg, bool KillSrc) const { + if (LoongArch::GPRRegClass.contains(DstReg, SrcReg)) { + BuildMI(MBB, MBBI, DL, get(LoongArch::OR), DstReg) + .addReg(SrcReg, getKillRegState(KillSrc)) + .addReg(LoongArch::R0); + return; + } + + // TODO: Now, we only support GPR->GPR copies. + llvm_unreachable("LoongArch didn't implement copyPhysReg"); +} diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td --- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td @@ -15,6 +15,9 @@ //===----------------------------------------------------------------------===// // TODO: Add LoongArch specific DAG Nodes +// Target-dependent nodes. +def loongarch_ret : SDNode<"LoongArchISD::RET", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. @@ -494,6 +497,12 @@ def : PatGprGpr; def : PatGprGpr; +/// Branches and jumps + +let isBarrier = 1, isReturn = 1, isTerminator = 1 in +def PseudoRET : Pseudo<(outs), (ins), [(loongarch_ret)]>, + PseudoInstExpansion<(JIRL R0, R1, 0)>; + /// LA32 patterns let Predicates = [IsLA32] in { diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp --- a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp @@ -29,12 +29,11 @@ RegisterTargetMachine Y(getTheLoongArch64Target()); } -// FIXME: This is just a placeholder to make current commit buildable. Body of -// this function will be filled in later commits. static std::string computeDataLayout(const Triple &TT) { - std::string Ret; - Ret += "e"; - return Ret; + if (TT.isArch64Bit()) + return "e-m:e-p:64:64-i64:64-i128:128-n64-S128"; + assert(TT.isArch32Bit() && "only LA32 and LA64 are currently supported"); + return "e-m:e-p:32:32-i64:64-n32-S128"; } static Reloc::Model getEffectiveRelocModel(const Triple &TT, diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/add.ll @@ -0,0 +1,17 @@ +; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=CHECK32 +; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=CHECK64 + +define i32 @addRR(i32 %x, i32 %y) { +; CHECK32-LABEL: addRR: +; CHECK32: # %bb.0: # %entry +; CHECK32-NEXT: add.w $a0, $a1, $a0 +; CHECK32-NEXT: jirl $zero, $ra, 0 +; +; CHECK64-LABEL: addRR: +; CHECK64: # %bb.0: # %entry +; CHECK64-NEXT: add.d $a0, $a1, $a0 +; CHECK64-NEXT: jirl $zero, $ra, 0 +entry: + %add = add nsw i32 %y, %x + ret i32 %add +}