Index: lib/Target/AMDGPU/AMDGPUISelLowering.h =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.h +++ lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -73,6 +73,8 @@ SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; + protected: bool shouldCombineMemoryType(EVT VT) const; SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const; Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -26,6 +26,7 @@ #include "Utils/AMDGPUBaseInfo.h" #include "R600MachineFunctionInfo.h" #include "SIInstrInfo.h" +#include "SIRegisterInfo.h" #include "SIMachineFunctionInfo.h" #include "MCTargetDesc/AMDGPUMCTargetDesc.h" #include "llvm/CodeGen/Analysis.h" Index: lib/Target/AMDGPU/SIISelLowering.h =================================================================== --- lib/Target/AMDGPU/SIISelLowering.h +++ lib/Target/AMDGPU/SIISelLowering.h @@ -89,7 +89,7 @@ SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const; SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; - + SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; SDValue adjustLoadValueType(unsigned Opcode, MemSDNode *M, SelectionDAG &DAG, ArrayRef Ops, bool IsIntrinsic = false) const; Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -3693,6 +3693,7 @@ switch (Op.getOpcode()) { default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); case ISD::BRCOND: return LowerBRCOND(Op, DAG); + case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); case ISD::LOAD: { SDValue Result = LowerLOAD(Op, DAG); assert((!Result.getNode() || @@ -4151,6 +4152,28 @@ return Chain; } +SDValue SITargetLowering::LowerRETURNADDR(SDValue Op, + SelectionDAG &DAG) const { + // Checking the depth + if (cast(Op.getOperand(0))->getZExtValue() != 0) { + DAG.getContext()->emitError( + "Return address can be determined only for current frame"); + return SDValue(); + } + + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo &MFI = MF.getFrameInfo(); + // There is a call to @llvm.returnaddress in this function + MFI.setReturnAddressIsTaken(true); + + const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo(); + MVT VT = Op.getSimpleValueType(); + // Get the return address reg and mark it as an implicit live-in + unsigned Reg = MF.addLiveIn(TRI->getReturnAddressReg(MF), getRegClassFor(VT)); + + return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT); +} + SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG, SDValue Op, const SDLoc &DL, Index: test/CodeGen/AMDGPU/returnaddress.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/returnaddress.ll @@ -0,0 +1,19 @@ +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck --check-prefix=GCN %s + +; GCN-LABEL: {{^}}func1 +; GCN: v_mov_b32_e32 v0, s30 +; GCN: v_mov_b32_e32 v1, s31 +; GCN: s_setpc_b64 s[30:31] +define i8* @func1() nounwind { +entry: + %0 = tail call i8* @llvm.returnaddress(i32 0) + ret i8* %0 +} + +declare i8* @llvm.returnaddress(i32) nounwind readnone + +define i8* @func2() nounwind { +entry: + %0 = tail call i8* @llvm.returnaddress(i32 1) + ret i8* %0 +}