Index: lib/Target/NDS32/CMakeLists.txt =================================================================== --- lib/Target/NDS32/CMakeLists.txt +++ lib/Target/NDS32/CMakeLists.txt @@ -11,6 +11,7 @@ add_llvm_target(NDS32CodeGen NDS32ISelDAGToDAG.cpp + NDS32ISelLowering.cpp NDS32TargetMachine.cpp ) Index: lib/Target/NDS32/MCTargetDesc/NDS32BaseInfo.h =================================================================== --- /dev/null +++ lib/Target/NDS32/MCTargetDesc/NDS32BaseInfo.h @@ -0,0 +1,57 @@ +//===-- NDS32BaseInfo.h - Top level definitions for NDS32 MC ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains small standalone helper functions and enum definitions for +// the NDS32 target useful for the compiler back-end and the MC libraries. +// +//===----------------------------------------------------------------------===// +#ifndef LLVM_LIB_TARGET_NDS32_MCTARGETDESC_NDS32BASEINFO_H +#define LLVM_LIB_TARGET_NDS32_MCTARGETDESC_NDS32BASEINFO_H + +#include "NDS32MCTargetDesc.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" + +namespace llvm { + +/// NDS32II - This namespace holds all of the target specific flags that +/// instruction info tracks. +/// +namespace NDS32II { + /// Target Operand Flag enum. + enum TOF { + //===------------------------------------------------------------------===// + // NDS32 Specific MachineOperand flags. + MO_NO_FLAG, + + /// MO_ABS_HI/LO - Represents the hi or low part of an absolute symbol + /// address. + MO_HI20, + MO_LO12, + + }; +} + +/// isNDS32LowRegister - Returns true if the register is a low register (r0-r7). +/// +static inline bool isNDS32LowRegister(unsigned Reg) { + using namespace NDS32; + switch (Reg) { + case R0: case R1: case R2: case R3: + case R4: case R5: case R6: case R7: + return true; + default: + return false; + } +} + +} + +#endif Index: lib/Target/NDS32/NDS32ISelLowering.h =================================================================== --- /dev/null +++ lib/Target/NDS32/NDS32ISelLowering.h @@ -0,0 +1,235 @@ +//===-- NDS32ISelLowering.h - NDS32 DAG Lowering Interface ----*- C++ -*---===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interfaces that NDS32 uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_NDS32_NDS32ISELLOWERING_H +#define LLVM_LIB_TARGET_NDS32_NDS32ISELLOWERING_H + +#include "NDS32.h" +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/Target/TargetLowering.h" +#include "MCTargetDesc/NDS32BaseInfo.h" + +namespace llvm { + namespace NDS32ISD { + enum NodeType : unsigned { + FIRST_NUMBER = ISD::BUILTIN_OP_END, + + // Get the Higher 20 bits from a 32-bit immediate/symbol + Hi20, + + // Get the Lower 12 bits from a 32-bit immediate/symbol + Lo12, + + /// Function return. Operand 0 is the chain operand. + RET, + + /// Same as RET, but used for returning from ISRs. + IRET, + + // Pseudo-instruction representing a memory copy using lmw/smw + // instructions. + MEMCPY, + + /// CALL - These operations represent an abstract call + /// instruction, which includes a bunch of information. + CALL, + + /// Wrapper - A wrapper node for TargetConstantPool, TargetExternalSymbol, + /// and TargetGlobalAddress. + Wrapper, + + /// SDIVREM, UDIVREM - Multiple output division instructions. + SDIVREM, UDIVREM + }; + } + + class NDS32Subtarget; + class NDS32TargetLowering : public TargetLowering { + public: + explicit NDS32TargetLowering(const TargetMachine &TM, + const NDS32Subtarget &STI); + + MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override { + return MVT::i32; + } + + /// LowerOperation - Provide custom lowering hooks for some operations. + SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; + + /// getTargetNodeName - This method returns the name of a target specific + /// DAG node. + const char *getTargetNodeName(unsigned Opcode) const override; + + void HandleByVal(CCState *, unsigned &, unsigned) const override; + + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerDIVREM(SDValue Op, SelectionDAG &DAG) const; + SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const; + + TargetLowering::ConstraintType + getConstraintType(StringRef Constraint) const override; + std::pair + getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + StringRef Constraint, MVT VT) const override; + + /// isLegalAddressingMode - Return true if the addressing mode represented + /// by AM is legal for this target, for a load/store of the specified type. + bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, + Type *Ty, unsigned AS) const override; + + bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; + + void AdjustInstrPostInstrSelection(MachineInstr &MI, + SDNode *Node) const override; + + /// If a physical register, this returns the register that receives the + /// exception address on entry to an EH pad. + unsigned + getExceptionPointerRegister(const Constant *PersonalityFn) const override { + return NDS32::R0; + } + + /// If a physical register, this returns the register that receives the + /// exception typeid on entry to a landing pad. + unsigned + getExceptionSelectorRegister(const Constant *PersonalityFn) const override { + return NDS32::R1; + } + + + protected: + // This method creates the following nodes, which are necessary for + // computing a symbol's address in non-PIC mode: + // + // (or %hi(sym), %lo(sym)) + template + SDValue getAddrNonPIC(NodeTy *N, const SDLoc &DL, EVT Ty, + SelectionDAG &DAG) const { + SDValue Hi = getTargetNode(N, Ty, DAG, NDS32II::MO_HI20); + SDValue Lo = getTargetNode(N, Ty, DAG, NDS32II::MO_LO12); + return DAG.getNode(ISD::OR, DL, Ty, + DAG.getNode(NDS32ISD::Hi20, DL, Ty, Hi), + DAG.getNode(NDS32ISD::Lo12, DL, Ty, Lo)); + } + + private: + /// Subtarget - Keep a pointer to the NDS32Subtarget around so that we can + /// make the right decision when generating code for different targets. + const NDS32Subtarget *Subtarget; + + // Create a TargetGlobalAddress node. + SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG, + unsigned Flag) const; + + // Create a TargetExternalSymbol node. + SDValue getTargetNode(ExternalSymbolSDNode *N, EVT Ty, SelectionDAG &DAG, + unsigned Flag) const; + + // Create a TargetBlockAddress node. + SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG, + unsigned Flag) const; + + // Create a TargetJumpTable node. + SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG, + unsigned Flag) const; + + // Create a TargetConstantPool node. + SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG, + unsigned Flag) const; + + typedef SmallVector, 8> RegsToPassVector; + + SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, + const SDLoc &dl, SelectionDAG &DAG, + const CCValAssign &VA, + ISD::ArgFlagsTy Flags) const; + + SDValue LowerCCCCallTo(SDValue Chain, SDValue Callee, + CallingConv::ID CallConv, bool isVarArg, + bool isTailCall, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SmallVectorImpl &Ins, + const SDLoc &dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const; + + SDValue LowerCCCArguments(SDValue Chain, CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl &Ins, + const SDLoc &dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const; + + SDValue LowerCallResult(SDValue Chain, SDValue InFlag, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl &Ins, + const SDLoc &dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const; + + /// copyByValArg - Copy argument registers which were used to pass a byval + /// argument to the stack. Create a stack frame object for the byval + /// argument. + void copyByValRegs(SDValue Chain, const SDLoc &DL, + std::vector &OutChains, SelectionDAG &DAG, + const ISD::ArgFlagsTy &Flags, + SmallVectorImpl &InVals, + const Argument *FuncArg, unsigned FirstReg, + unsigned LastReg, const CCValAssign &VA, + CCState &State) const; + + /// passByValArg - Pass a byval argument in registers or on stack. + void passByValArg(SDValue Chain, const SDLoc &DL, + RegsToPassVector &RegsToPass, + SmallVectorImpl &MemOpChains, SDValue StackPtr, + MachineFrameInfo &MFI, SelectionDAG &DAG, SDValue Arg, + unsigned FirstReg, unsigned LastReg, + const ISD::ArgFlagsTy &Flags, bool isLittle, + const CCValAssign &VA) const; + + /// RestoreVarArgRegs - Restore variable function arguments passed in + /// registers to the stack. Also create a stack frame object for the + /// first variable argument. + void RestoreVarArgRegs(std::vector &OutChains, SDValue Chain, + const SDLoc &DL, SelectionDAG &DAG, + CCState &State) const; + + SDValue + LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl &Ins, + const SDLoc &dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const override; + SDValue + LowerCall(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl &InVals) const override; + + SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SDLoc &dl, SelectionDAG &DAG) const override; + + bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, + SDValue &Base, + SDValue &Offset, + ISD::MemIndexedMode &AM, + SelectionDAG &DAG) const override; + }; +} // namespace llvm + +#endif Index: lib/Target/NDS32/NDS32ISelLowering.cpp =================================================================== --- /dev/null +++ lib/Target/NDS32/NDS32ISelLowering.cpp @@ -0,0 +1,1121 @@ +//===-- NDS32ISelLowering.cpp - NDS32 DAG Lowering Implementation --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the NDS32TargetLowering class. +// +//===----------------------------------------------------------------------===// + +#include "NDS32ISelLowering.h" +#include "NDS32.h" +#include "NDS32MachineFunctionInfo.h" +#include "NDS32Subtarget.h" +#include "NDS32TargetMachine.h" +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/IR/CallingConv.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalAlias.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +#define DEBUG_TYPE "NDS32-lower" + + +NDS32TargetLowering::NDS32TargetLowering(const TargetMachine &TM, + const NDS32Subtarget &STI) + : TargetLowering(TM), Subtarget(&STI) { + + // Set up the register classes. + addRegisterClass(MVT::i32, &NDS32::GPRRegClass); + + // Compute derived properties from the register classes + computeRegisterProperties(STI.getRegisterInfo()); + + // Load extented operations for i1 types must be promoted + for (MVT VT : MVT::integer_valuetypes()) { + setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); + setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); + } + + // Post increment load/store are legal in NDS32 ISA + setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal); + setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal); + setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal); + setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal); + setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal); + setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal); + + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); + + setOperationAction(ISD::BR_CC, MVT::i32, Expand); + setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); + + setOperationAction(ISD::ADDE, MVT::i32, Expand); + setOperationAction(ISD::ADDC, MVT::i32, Expand); + setOperationAction(ISD::SUBE, MVT::i32, Expand); + setOperationAction(ISD::SUBC, MVT::i32, Expand); + + // Expand multiplication operations + setOperationAction(ISD::MULHU, MVT::i32, Expand); + setOperationAction(ISD::MULHS, MVT::i32, Expand); + setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); + setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); + + // Expand division operations + setOperationAction(ISD::UREM, MVT::i32, Expand); + setOperationAction(ISD::SREM, MVT::i32, Expand); + setOperationAction(ISD::UDIVREM, MVT::i32, Custom); + setOperationAction(ISD::SDIVREM, MVT::i32, Custom); + setOperationAction(ISD::UDIV, MVT::i32, Expand); + setOperationAction(ISD::SDIV, MVT::i32, Expand); + + setOperationAction(ISD::BR_JT, MVT::Other, Expand); + setOperationAction(ISD::JumpTable, MVT::i32, Custom); + setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); + setOperationAction(ISD::BlockAddress, MVT::i32, Custom); + + setOperationAction(ISD::VASTART, MVT::Other, Custom); + setOperationAction(ISD::VAARG, MVT::Other, Expand); + setOperationAction(ISD::VACOPY, MVT::Other, Expand); + setOperationAction(ISD::VAEND, MVT::Other, Expand); + + setOperationAction(ISD::CTTZ, MVT::i32, Expand); + setOperationAction(ISD::CTTZ, MVT::i64, Expand); + setOperationAction(ISD::ROTL, MVT::i32, Expand); + setOperationAction(ISD::ROTL, MVT::i64, Expand); + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand); + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); + + setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); + setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + + setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); + setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); + setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); + + setOperationAction(ISD::BSWAP, MVT::i32, Expand); + setOperationAction(ISD::BSWAP, MVT::i64, Expand); + + setOperationAction(ISD::CTPOP, MVT::i32, Expand); + + // Set them all for expansion, which will force libcalls. + setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); + + // Provide all sorts of operation actions + setStackPointerRegisterToSaveRestore(NDS32::SP); + setBooleanContents(ZeroOrOneBooleanContent); + setBooleanVectorContents(ZeroOrOneBooleanContent); + + setMinFunctionAlignment(2); + setPrefFunctionAlignment(4); +} + +SDValue NDS32TargetLowering::LowerOperation(SDValue Op, + SelectionDAG &DAG) const { + switch (Op.getOpcode()) { + case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); + case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); + case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); + case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); + case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); + case ISD::VASTART: return LowerVASTART(Op, DAG); + case ISD::JumpTable: return LowerJumpTable(Op, DAG); + case ISD::UDIVREM: return LowerDIVREM(Op, DAG); + case ISD::SDIVREM: return LowerDIVREM(Op, DAG); + default: + llvm_unreachable("unimplemented operand"); + } +} + +//===----------------------------------------------------------------------===// +// NDS32 Addressing Mode Support +//===----------------------------------------------------------------------===// + +/// isLegalAddressImmediate - Return true if the integer value can be used +/// as the offset of the target addressing mode for load / store of the +/// given type. +static bool isLegalAddressImmediate(int64_t V, EVT VT, + const NDS32Subtarget *Subtarget) { + if (V == 0) + return true; + + if (!VT.isSimple()) + return false; + + if (V < 0) + V = - V; + + switch (VT.getSimpleVT().SimpleTy) { + default: return false; + case MVT::i8: + // imm15s + return V == (V & ((1LL << 15) - 1)); + case MVT::i16: + // imm15s << 1 + return (V == (V & ((1LL << 16) - 1))) && (V % 2 == 0); + case MVT::i32: + // imm15s << 2 + return (V == (V & ((1LL << 17) - 1))) && (V % 4 == 0); + case MVT::f32: + case MVT::f64: + return false; + } +} + +/// isLegalAddressingMode - Return true if the addressing mode represented +/// by AM is legal for this target, for a load/store of the specified type. +bool NDS32TargetLowering::isLegalAddressingMode(const DataLayout &DL, + const AddrMode &AM, Type *Ty, + unsigned AS) const { + EVT VT = getValueType(DL, Ty, true); + if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) + return false; + + // Can never fold addr of global into load/store. + if (AM.BaseGV) + return false; + + switch (AM.Scale) { + case 0: // no scale reg, must be "r+i" or "r", or "i". + break; + case 1: + if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. + return false; + // Otherwise we have r+r or r+i. + break; + default: + return false; + } + return true; +} + +// Always return false to avoid SelectionDAG::FoldSymbolOffset() +// folding offset into symbol which the NDS32 target not support. +bool +NDS32TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { + return false; +} + + +//===----------------------------------------------------------------------===// +// NDS32 Inline Assembly Support +//===----------------------------------------------------------------------===// + +/// getConstraintType - Given a constraint letter, return the type of +/// constraint it is for this target. +TargetLowering::ConstraintType +NDS32TargetLowering::getConstraintType(StringRef Constraint) const { + if (Constraint.size() == 1) { + switch (Constraint[0]) { + case 'r': + return C_RegisterClass; + default: + break; + } + } + return TargetLowering::getConstraintType(Constraint); +} + +std::pair +NDS32TargetLowering::getRegForInlineAsmConstraint( + const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { + if (Constraint.size() == 1) { + // GCC Constraint Letters + switch (Constraint[0]) { + default: break; + case 'r': // GENERAL_REGS + return std::make_pair(0U, &NDS32::GPRRegClass); + } + } + + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); +} + +//===----------------------------------------------------------------------===// +// Calling Convention Implementation +//===----------------------------------------------------------------------===// + +#include "NDS32GenCallingConv.inc" + +// addLiveIn - This helper function adds the specified physical register to the +// MachineFunction as a live in value. It also creates a corresponding +// virtual register for it. +static unsigned +addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC) +{ + unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); + MF.getRegInfo().addLiveIn(PReg, VReg); + return VReg; +} + +static const MCPhysReg NDS32ArgRegs[6] = { + NDS32::R0, NDS32::R1, NDS32::R2, + NDS32::R3, NDS32::R4, NDS32::R5 +}; + +// RestoreVarArgRegs - Store VarArg register to the stack +void NDS32TargetLowering::RestoreVarArgRegs(std::vector &OutChains, + SDValue Chain, const SDLoc &DL, + SelectionDAG &DAG, + CCState &State) const { + ArrayRef ArgRegs = makeArrayRef(NDS32ArgRegs); + unsigned Idx = State.getFirstUnallocated(ArgRegs); + unsigned RegSizeInBytes = 4; + MVT RegTy = MVT::getIntegerVT(RegSizeInBytes * 8); + const TargetRegisterClass *RC = getRegClassFor(RegTy); + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + NDS32MachineFunctionInfo *NDS32FI = MF.getInfo(); + // VaArgOffset is the va_start offset from stack pointer + int VaArgOffset = 0; + + // All ArgRegs use to pass arguments + if (ArgRegs.size() == Idx) { + VaArgOffset = State.getNextStackOffset(); + // + // ---------------- <-- va_start + // | outgoing args| + // ---------------- <-- sp + // | R0 ~ R5 | + // ---------------- + } + else { + VaArgOffset = -(int)(RegSizeInBytes * (ArgRegs.size() - Idx)); + // E.g Idx = R3 which means f (a, b, c, ...) + // + // ---------------- <-- sp + // | R3 ~ R5 | + // ---------------- <-- va_start + // | R0 ~ R2 | + // ---------------- + } + + // Record the frame index of the first variable argument + // which is a value necessary to VASTART. + int VA_FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true); + NDS32FI->setVarArgsFrameIndex(VA_FI); + + // Copy the integer registers that have not been used for argument passing + // to the argument register save area. The save area is allocated in the + // callee's stack frame. + // For above case Idx = R3, generate store to push R3~R5 to callee stack + // frame. + for (unsigned I = Idx; I < ArgRegs.size(); + ++I, VaArgOffset += RegSizeInBytes) { + unsigned Reg = addLiveIn(MF, ArgRegs[I], RC); + SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy); + int FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true); + SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); + SDValue Store = + DAG.getStore(Chain, DL, ArgValue, PtrOff, MachinePointerInfo()); + cast(Store.getNode())->getMemOperand()->setValue( + (Value *)nullptr); + OutChains.push_back(Store); + } +} + +// LowerFormalArguments - Specify how callee get arguments +SDValue NDS32TargetLowering::LowerFormalArguments( + SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl &Ins, const SDLoc &DL, + SelectionDAG &DAG, SmallVectorImpl &InVals) const { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + NDS32MachineFunctionInfo *FI = MF.getInfo(); + + FI->setVarArgsFrameIndex(0); + + // Used with vargs to accumulate store chains. + std::vector OutChains; + + // Assign locations to all of the incoming arguments. + SmallVector ArgLocs; + + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); + + const Function *Func = DAG.getMachineFunction().getFunction(); + Function::const_arg_iterator FuncArg = Func->arg_begin(); + + CCInfo.AnalyzeFormalArguments(Ins, CC_NDS32_SoftFloat); + + unsigned CurArgIdx = 0; + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + CCValAssign &VA = ArgLocs[i]; + if (Ins[i].isOrigArg()) { + std::advance(FuncArg, Ins[i].getOrigArgIndex() - CurArgIdx); + CurArgIdx = Ins[i].getOrigArgIndex(); + } + ISD::ArgFlagsTy Flags = Ins[i].Flags; + + if (Flags.isByVal()) { + assert(Ins[i].isOrigArg() && "Byval arguments cannot be implicit"); + unsigned FirstByValReg, LastByValReg; + unsigned ByValIdx = CCInfo.getInRegsParamsProcessed(); + CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg); + + assert(Flags.getByValSize() && + "ByVal args of size 0 should have been ignored by front-end."); + assert(ByValIdx < CCInfo.getInRegsParamsCount()); + copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg, + FirstByValReg, LastByValReg, VA, CCInfo); + CCInfo.nextInRegsParam(); + continue; + } + + bool IsRegLoc = VA.isRegLoc(); + // Arguments stored on registers + if (IsRegLoc) { + MVT RegVT = VA.getLocVT(); + unsigned ArgReg = VA.getLocReg(); + const TargetRegisterClass *RC = getRegClassFor(RegVT); + + // Transform the arguments stored on + // physical registers into virtual ones + unsigned Reg = addLiveIn(DAG.getMachineFunction(), ArgReg, RC); + SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT); + + InVals.push_back(ArgValue); + } else { // VA.isRegLoc() + MVT LocVT = VA.getLocVT(); + + // sanity check + assert(VA.isMemLoc()); + + // The stack pointer offset is relative to the caller stack frame. + int FrameIdx = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8, + VA.getLocMemOffset(), true); + // Create load nodes to retrieve arguments from the stack + SDValue FIN = DAG.getFrameIndex(FrameIdx, + getPointerTy(DAG.getDataLayout())); + SDValue ArgValue = DAG.getLoad( + LocVT, DL, Chain, FIN, + MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), + FrameIdx)); + OutChains.push_back(ArgValue.getValue(1)); + + InVals.push_back(ArgValue); + } + } + + // Push VarArg Registers to the stack + if (IsVarArg) + RestoreVarArgRegs(OutChains, Chain, DL, DAG, CCInfo); + + // All stores are grouped in one node to allow the matching between + // the size of Ins and InVals. This only happens when on varg functions + if (!OutChains.empty()) { + OutChains.push_back(Chain); + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); + } + + return Chain; +} + +/// LowerMemOpCallTo - Store the argument to the stack. +SDValue NDS32TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, + SDValue Arg, const SDLoc &dl, + SelectionDAG &DAG, + const CCValAssign &VA, + ISD::ArgFlagsTy Flags) const { + unsigned LocMemOffset = VA.getLocMemOffset(); + SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); + PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), + StackPtr, PtrOff); + return DAG.getStore( + Chain, dl, Arg, PtrOff, + MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset)); +} + +void NDS32TargetLowering::copyByValRegs( + SDValue Chain, const SDLoc &DL, std::vector &OutChains, + SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags, + SmallVectorImpl &InVals, const Argument *FuncArg, + unsigned FirstReg, unsigned LastReg, const CCValAssign &VA, + CCState &State) const { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + unsigned GPRSizeInBytes = 4; + unsigned NumRegs = LastReg - FirstReg; + unsigned RegAreaSize = NumRegs * GPRSizeInBytes; + unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize); + int FrameObjOffset; + ArrayRef ByValArgRegs = makeArrayRef(NDS32ArgRegs); + + if (RegAreaSize) + FrameObjOffset = + - (int)((ByValArgRegs.size() - FirstReg) * GPRSizeInBytes); + else + FrameObjOffset = VA.getLocMemOffset(); + + // Create frame object. + EVT PtrTy = getPointerTy(DAG.getDataLayout()); + int FI = MFI.CreateFixedObject(FrameObjSize, FrameObjOffset, true); + SDValue FIN = DAG.getFrameIndex(FI, PtrTy); + InVals.push_back(FIN); + + if (!NumRegs) + return; + + // Copy arg registers. + MVT RegTy = MVT::getIntegerVT(GPRSizeInBytes * 8); + const TargetRegisterClass *RC = getRegClassFor(RegTy); + + for (unsigned I = 0; I < NumRegs; ++I) { + unsigned ArgReg = ByValArgRegs[FirstReg + I]; + unsigned VReg = addLiveIn(MF, ArgReg, RC); + unsigned Offset = I * GPRSizeInBytes; + SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN, + DAG.getConstant(Offset, DL, PtrTy)); + SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy), + StorePtr, MachinePointerInfo(FuncArg, Offset)); + OutChains.push_back(Store); + } +} + +// Copy byVal arg to registers and stack. +void NDS32TargetLowering::passByValArg( + SDValue Chain, const SDLoc &DL, + RegsToPassVector &RegsToPass, + SmallVectorImpl &MemOpChains, SDValue StackPtr, + MachineFrameInfo &MFI, SelectionDAG &DAG, SDValue Arg, unsigned FirstReg, + unsigned LastReg, const ISD::ArgFlagsTy &Flags, bool isLittle, + const CCValAssign &VA) const { + unsigned ByValSizeInBytes = Flags.getByValSize(); + unsigned OffsetInBytes = 0; // From beginning of struct + unsigned RegSizeInBytes = 4; + unsigned Alignment = std::min(Flags.getByValAlign(), RegSizeInBytes); + EVT PtrTy = getPointerTy(DAG.getDataLayout()), + RegTy = MVT::getIntegerVT(RegSizeInBytes * 8); + unsigned NumRegs = LastReg - FirstReg; + + if (NumRegs) { + ArrayRef ArgRegs = makeArrayRef(NDS32ArgRegs); + bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes); + unsigned I = 0; + + // Copy words to registers. + for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) { + SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg, + DAG.getConstant(OffsetInBytes, DL, PtrTy)); + SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr, + MachinePointerInfo(), Alignment); + MemOpChains.push_back(LoadVal.getValue(1)); + unsigned ArgReg = ArgRegs[FirstReg + I]; + RegsToPass.push_back(std::make_pair(ArgReg, LoadVal)); + } + + // Return if the struct has been fully copied. + if (ByValSizeInBytes == OffsetInBytes) + return; + + // Copy the remainder of the byval argument with sub-word loads and shifts. + if (LeftoverBytes) { + SDValue Val; + + for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0; + OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) { + unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes; + + if (RemainingSizeInBytes < LoadSizeInBytes) + continue; + + // Load subword. + SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg, + DAG.getConstant(OffsetInBytes, DL, + PtrTy)); + SDValue LoadVal = DAG.getExtLoad( + ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(), + MVT::getIntegerVT(LoadSizeInBytes * 8), Alignment); + MemOpChains.push_back(LoadVal.getValue(1)); + + // Shift the loaded value. + unsigned Shamt; + + if (isLittle) + Shamt = TotalBytesLoaded * 8; + else + Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8; + + SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal, + DAG.getConstant(Shamt, DL, MVT::i32)); + + if (Val.getNode()) + Val = DAG.getNode(ISD::OR, DL, RegTy, Val, Shift); + else + Val = Shift; + + OffsetInBytes += LoadSizeInBytes; + TotalBytesLoaded += LoadSizeInBytes; + Alignment = std::min(Alignment, LoadSizeInBytes); + } + + unsigned ArgReg = ArgRegs[FirstReg + I]; + RegsToPass.push_back(std::make_pair(ArgReg, Val)); + return; + } + } + + // Copy remainder of byval arg to it with memcpy. + unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes; + SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg, + DAG.getConstant(OffsetInBytes, DL, PtrTy)); + SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr, + DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); + Chain = DAG.getMemcpy(Chain, DL, Dst, Src, + DAG.getConstant(MemCpySize, DL, PtrTy), + Alignment, /*isVolatile=*/false, /*AlwaysInline=*/false, + /*isTailCall=*/false, + MachinePointerInfo(), MachinePointerInfo()); + MemOpChains.push_back(Chain); +} + +void NDS32TargetLowering::HandleByVal(CCState *State, unsigned &Size, + unsigned Align) const { + const TargetFrameLowering *TFL = Subtarget->getFrameLowering(); + + assert(Size && "Byval argument's size shouldn't be 0."); + + Align = std::min(Align, TFL->getStackAlignment()); + + unsigned FirstReg = 0; + unsigned NumRegs = 0; + + if (State->getCallingConv() != CallingConv::Fast) { + unsigned RegSizeInBytes = 4; + ArrayRef IntArgRegs = makeArrayRef(NDS32ArgRegs); + const MCPhysReg *ShadowRegs = IntArgRegs.data(); + + // We used to check the size as well but we can't do that anymore since + // CCState::HandleByVal() rounds up the size after calling this function. + assert(!(Align % RegSizeInBytes) && + "Byval argument's alignment should be a multiple of" + "RegSizeInBytes."); + + FirstReg = State->getFirstUnallocated(IntArgRegs); + + // If Align > RegSizeInBytes, the first arg register must be even. + if ((Align > RegSizeInBytes) && (FirstReg % 2)) { + State->AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]); + ++FirstReg; + } + + // Mark the registers allocated. + Size = alignTo(Size, RegSizeInBytes); + for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size()); + Size -= RegSizeInBytes, ++I, ++NumRegs) + State->AllocateReg(IntArgRegs[I], ShadowRegs[I]); + } + + State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs); +} + +// LowerCall - Specify how caller passing arguments +SDValue +NDS32TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, + SmallVectorImpl &InVals) const { + SelectionDAG &DAG = CLI.DAG; + SDLoc &DL = CLI.DL; + SmallVectorImpl &Outs = CLI.Outs; + SmallVectorImpl &OutVals = CLI.OutVals; + SmallVectorImpl &Ins = CLI.Ins; + SDValue Chain = CLI.Chain; + SDValue Callee = CLI.Callee; + bool &IsTailCall = CLI.IsTailCall; + CallingConv::ID CallConv = CLI.CallConv; + bool IsVarArg = CLI.IsVarArg; + + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + + // NDS32 target does not support tail call optimization yet. + IsTailCall = false; + + const TargetFrameLowering *TFL = Subtarget->getFrameLowering(); + + // Analyze operands of the call, assigning locations to each operand. + SmallVector ArgLocs; + + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); + + CCInfo.AnalyzeCallOperands(Outs, CC_NDS32); + + // Get a count of how many bytes are to be pushed on the stack. + unsigned NextStackOffset = CCInfo.getNextStackOffset(); + + // Chain is the output chain of the last Load/Store or CopyToReg node. + // ByValChain is the output chain of the last Memcpy node created for copying + // byval arguments to the stack. + unsigned StackAlignment = TFL->getStackAlignment(); + NextStackOffset = alignTo(NextStackOffset, StackAlignment); + SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, DL, true); + + if (!IsTailCall) + Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal, DL); + + RegsToPassVector RegsToPass; + SmallVector MemOpChains; + + SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, NDS32::SP, + getPointerTy(DAG.getDataLayout())); + // Walk the register/memloc assignments, inserting copies/loads. + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + SDValue Arg = OutVals[i]; + CCValAssign &VA = ArgLocs[i]; + ISD::ArgFlagsTy Flags = Outs[i].Flags; + + // ByVal Arg. + if (Flags.isByVal()) { + unsigned FirstByValReg, LastByValReg; + unsigned ByValIdx = CCInfo.getInRegsParamsProcessed(); + CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg); + + assert(Flags.getByValSize() && + "ByVal args of size 0 should have been ignored by front-end."); + assert(ByValIdx < CCInfo.getInRegsParamsCount()); + assert(!IsTailCall && + "Do not tail-call optimize if there is a byval argument."); + passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg, + FirstByValReg, LastByValReg, Flags, true, VA); + CCInfo.nextInRegsParam(); + continue; + } + + // Promote the value if needed. + switch (VA.getLocInfo()) { + default: llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: break; + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); + break; + case CCValAssign::AExt: + Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); + break; + case CCValAssign::BCvt: + Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); + break; + } + + if (VA.isRegLoc()) { + RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + } else { + assert(VA.isMemLoc()); + MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, + DL, DAG, VA, Flags)); + } + } + + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); + + // Build a sequence of copy-to-reg nodes chained together with token chain and + // flag operands which copy the outgoing args into registers. The InFlag in + // necessary since all emitted instructions must be stuck together. + SDValue InFlag; + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { + Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first, + RegsToPass[i].second, InFlag); + InFlag = Chain.getValue(1); + } + + // If the callee is a GlobalAddress node (quite common, every direct call is) + // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. + // Likewise ExternalSymbol -> TargetExternalSymbol. + if (GlobalAddressSDNode *G = dyn_cast(Callee)) + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, MVT::i32); + else if (ExternalSymbolSDNode *E = dyn_cast(Callee)) + Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32); + + std::vector Ops; + Ops.push_back(Chain); + Ops.push_back(Callee); + + // Add argument registers to the end of the list so that they are + // known live into the call. + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) + Ops.push_back(DAG.getRegister(RegsToPass[i].first, + RegsToPass[i].second.getValueType())); + + // Add a register mask operand representing the call-preserved(callee saved) + // registers. Let register allocator could get correct register live time + // cross call. + const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); + const uint32_t *Mask = + TRI->getCallPreservedMask(DAG.getMachineFunction(), CLI.CallConv); + assert(Mask && "Missing call preserved mask for calling convention"); + Ops.push_back(DAG.getRegisterMask(Mask)); + + if (InFlag.getNode()) + Ops.push_back(InFlag); + + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); + Chain = DAG.getNode(NDS32ISD::CALL, DL, NodeTys, Ops); + InFlag = Chain.getValue(1); + + // Create the CALLSEQ_END node. + Chain = DAG.getCALLSEQ_END(Chain, NextStackOffsetVal, + DAG.getIntPtrConstant(0, DL, true), InFlag, DL); + InFlag = Chain.getValue(1); + + // Handle result values, copying them out of physregs into vregs that we + // return. + return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, + DAG, InVals); +} + +// LowerReturn - Specify how callee return value +SDValue +NDS32TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SDLoc &dl, SelectionDAG &DAG) const { + + // CCValAssign - represent the assignment of the return value to a location + SmallVector RVLocs; + + // CCState - Info about the registers and stack slot. + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); + + // Analize return values. + CCInfo.AnalyzeReturn(Outs, RetCC_NDS32); + + SDValue Flag; + SmallVector RetOps(1, Chain); + + // Copy the result values into the output registers. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); + + Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), + OutVals[i], Flag); + + // Guarantee that all emitted copies are stuck together, + // avoiding something bad. + Flag = Chain.getValue(1); + RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); + } + + unsigned Opc = NDS32ISD::RET; + + RetOps[0] = Chain; // Update chain. + + // Add the flag if we have it. + if (Flag.getNode()) + RetOps.push_back(Flag); + + return DAG.getNode(Opc, dl, MVT::Other, RetOps); +} + +/// LowerCallResult - Lower the result values of a call into the +/// appropriate copies out of appropriate physical registers. +/// Specify how caller get return value +SDValue NDS32TargetLowering::LowerCallResult( + SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl &Ins, const SDLoc &dl, + SelectionDAG &DAG, SmallVectorImpl &InVals) const { + + // Assign locations to each value returned by this call. + SmallVector RVLocs; + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); + + CCInfo.AnalyzeCallResult(Ins, RetCC_NDS32); + + // Copy all of the result registers out of their specified physreg. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), + RVLocs[i].getValVT(), InFlag).getValue(1); + InFlag = Chain.getValue(2); + InVals.push_back(Chain.getValue(0)); + } + + return Chain; +} + +SDValue NDS32TargetLowering::LowerGlobalAddress(SDValue Op, + SelectionDAG &DAG) const { + EVT Ty = Op.getValueType(); + GlobalAddressSDNode *N = cast(Op); + + return getAddrNonPIC(N, SDLoc(N), Ty, DAG); +} + +SDValue NDS32TargetLowering:: +LowerJumpTable(SDValue Op, SelectionDAG &DAG) const +{ + JumpTableSDNode *N = cast(Op); + EVT Ty = Op.getValueType(); + + return getAddrNonPIC(N, SDLoc(N), Ty, DAG); +} + +SDValue NDS32TargetLowering::LowerExternalSymbol(SDValue Op, + SelectionDAG &DAG) const { + SDLoc dl(Op); + const char *Sym = cast(Op)->getSymbol(); + auto PtrVT = getPointerTy(DAG.getDataLayout()); + SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT); + + return DAG.getNode(NDS32ISD::Wrapper, dl, PtrVT, Result); +} + +SDValue NDS32TargetLowering::LowerBlockAddress(SDValue Op, + SelectionDAG &DAG) const { + BlockAddressSDNode *N = cast(Op); + EVT Ty = Op.getValueType(); + + return getAddrNonPIC(N, SDLoc(N), Ty, DAG); +} + +SDValue NDS32TargetLowering::LowerDIVREM(SDValue Op, SelectionDAG &DAG) const { + SDLoc dl(Op); + EVT VT = Op.getValueType(); + SDVTList vts = DAG.getVTList(VT, VT); + SDValue Ra = Op.getOperand(0); + SDValue Rb = Op.getOperand(1); + unsigned Opc = (Op->getOpcode() == ISD::SDIVREM) + ? NDS32ISD::SDIVREM : NDS32ISD::UDIVREM; + return DAG.getNode(Opc, dl, vts, Ra, Rb); +} + +SDValue +NDS32TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + NDS32MachineFunctionInfo *FuncInfo = MF.getInfo(); + int ReturnAddrIndex = FuncInfo->getRAIndex(); + auto PtrVT = getPointerTy(MF.getDataLayout()); + + if (ReturnAddrIndex == 0) { + // Set up a frame object for the return address. + uint64_t SlotSize = MF.getDataLayout().getPointerSize(); + ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize, -SlotSize, + true); + FuncInfo->setRAIndex(ReturnAddrIndex); + } + + return DAG.getFrameIndex(ReturnAddrIndex, PtrVT); +} + +SDValue NDS32TargetLowering::LowerRETURNADDR(SDValue Op, + SelectionDAG &DAG) const { + MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); + MFI.setReturnAddressIsTaken(true); + + if (verifyReturnAddressArgumentIsConstant(Op, DAG)) + return SDValue(); + + unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + SDLoc dl(Op); + auto PtrVT = getPointerTy(DAG.getDataLayout()); + + if (Depth > 0) { + SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); + SDValue Offset = + DAG.getConstant(DAG.getDataLayout().getPointerSize(), dl, MVT::i32); + return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), + DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), + MachinePointerInfo()); + } + + // Just load the return address. + SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); + return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, + MachinePointerInfo()); +} + +SDValue NDS32TargetLowering::LowerFRAMEADDR(SDValue Op, + SelectionDAG &DAG) const { + MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); + MFI.setFrameAddressIsTaken(true); + + EVT VT = Op.getValueType(); + SDLoc dl(Op); + unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); + SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, + NDS32::FP, VT); + while (Depth--) + FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, + MachinePointerInfo()); + return FrameAddr; +} + +SDValue NDS32TargetLowering::LowerVASTART(SDValue Op, + SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + NDS32MachineFunctionInfo *FuncInfo = MF.getInfo(); + auto PtrVT = getPointerTy(DAG.getDataLayout()); + + // Frame index of first vararg argument + SDValue FrameIndex = + DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); + const Value *SV = cast(Op.getOperand(2))->getValue(); + + // Create a store of the frame index to the location operand + return DAG.getStore(Op.getOperand(0), SDLoc(Op), FrameIndex, Op.getOperand(1), + MachinePointerInfo(SV)); +} + +static bool getNDS32IndexedAddressParts(SDNode *Ptr, EVT VT, SDValue &Base, + SDValue &Offset, SelectionDAG &DAG) { + if (Ptr->getOpcode() != ISD::ADD) + return false; + + Base = Ptr->getOperand(0); + ConstantSDNode *RHS = dyn_cast(Ptr->getOperand(1)); + + if (!RHS) return false; + + // We don't have to check the immediate limitation here. + // If ([reg], imm) couldn't fit immediate limitation, + // It could match to ([reg], reg) addressing mode. + + Base = Ptr->getOperand(0); + Offset = Ptr->getOperand(1); + + return true; +} + +bool NDS32TargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, + SDValue &Base, + SDValue &Offset, + ISD::MemIndexedMode &AM, + SelectionDAG &DAG) const { + EVT VT; + SDValue Ptr; + if (LoadSDNode *LD = dyn_cast(N)) { + VT = LD->getMemoryVT(); + Ptr = LD->getBasePtr(); + } else if (StoreSDNode *ST = dyn_cast(N)) { + VT = ST->getMemoryVT(); + Ptr = ST->getBasePtr(); + } else + return false; + + bool isLegal = false; + + isLegal = getNDS32IndexedAddressParts(Op, VT, Base, Offset, DAG); + if (!isLegal) + return false; + + AM = ISD::POST_INC; + return true; +} + +SDValue NDS32TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty, + SelectionDAG &DAG, + unsigned Flag) const { + return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag); +} + +SDValue NDS32TargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty, + SelectionDAG &DAG, + unsigned Flag) const { + return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag); +} + +SDValue NDS32TargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty, + SelectionDAG &DAG, + unsigned Flag) const { + return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag); +} + +SDValue NDS32TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty, + SelectionDAG &DAG, + unsigned Flag) const { + return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag); +} + +SDValue NDS32TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty, + SelectionDAG &DAG, + unsigned Flag) const { + return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(), + N->getOffset(), Flag); +} + +const char *NDS32TargetLowering::getTargetNodeName(unsigned Opcode) const { + switch ((NDS32ISD::NodeType)Opcode) { + case NDS32ISD::FIRST_NUMBER: break; + case NDS32ISD::CALL: return "NDS32ISD::CALL"; + case NDS32ISD::RET: return "NDS32ISD::RET"; + case NDS32ISD::IRET: return "NDS32ISD::IRET"; + case NDS32ISD::Hi20: return "NDS32ISD::Hi20"; + case NDS32ISD::Lo12: return "NDS32ISD::Lo12"; + case NDS32ISD::MEMCPY: return "NDS32ISD::MEMCPY"; + default: + break; + } + return nullptr; +} + + +//===----------------------------------------------------------------------===// +// Other Lowering Code +//===----------------------------------------------------------------------===// + +static void attachMEMCPYScratchRegs(const NDS32Subtarget *Subtarget, + MachineInstr &MI, const SDNode *Node) { + DebugLoc DL = MI.getDebugLoc(); + MachineFunction *MF = MI.getParent()->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + MachineInstrBuilder MIB(*MF, MI); + + // If the new dst/src is unused mark it as dead. + if (!Node->hasAnyUseOfValue(0)) { + MI.getOperand(0).setIsDead(true); + } + if (!Node->hasAnyUseOfValue(1)) { + MI.getOperand(1).setIsDead(true); + } + + // The MEMCPY both defines and kills the scratch registers. + for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) { + unsigned TmpReg = MRI.createVirtualRegister(&NDS32::GPRRegClass); + MIB.addReg(TmpReg, RegState::Define|RegState::Dead); + } +} + +void NDS32TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, + SDNode *Node) const { + if (MI.getOpcode() == NDS32::MEMCPY) { + attachMEMCPYScratchRegs(Subtarget, MI, Node); + return; + } +}