Index: llvm/trunk/include/llvm/CodeGen/TargetSubtargetInfo.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/TargetSubtargetInfo.h +++ llvm/trunk/include/llvm/CodeGen/TargetSubtargetInfo.h @@ -248,6 +248,9 @@ /// Returns string representation of scheduler comment std::string getSchedInfoStr(const MachineInstr &MI) const override; std::string getSchedInfoStr(MCInst const &MCI) const override; + + /// This is called after a .mir file was loaded. + virtual void mirFileLoaded(MachineFunction &MF) const; }; } // end namespace llvm Index: llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp +++ llvm/trunk/lib/CodeGen/MIRParser/MIRParser.cpp @@ -417,6 +417,8 @@ computeFunctionProperties(MF); + MF.getSubtarget().mirFileLoaded(MF); + MF.verify(); return false; } Index: llvm/trunk/lib/CodeGen/TargetSubtargetInfo.cpp =================================================================== --- llvm/trunk/lib/CodeGen/TargetSubtargetInfo.cpp +++ llvm/trunk/lib/CodeGen/TargetSubtargetInfo.cpp @@ -111,3 +111,6 @@ TSchedModel.computeInstrRThroughput(MCI.getOpcode()); return createSchedInfoStr(Latency, RThroughput); } + +void TargetSubtargetInfo::mirFileLoaded(MachineFunction &MF) const { +} Index: llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -142,6 +142,12 @@ STATISTIC(NumRedZoneFunctions, "Number of functions using red zone"); +/// This is the biggest offset to the stack pointer we can encode in aarch64 +/// instructions (without using a separate calculation and a temp register). +/// Note that the exception here are vector stores/loads which cannot encode any +/// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()). +static const unsigned DefaultSafeSPDisplacement = 255; + /// Look at each instruction that references stack frames and return the stack /// size limit beyond which some of these instructions will require a scratch /// register during their expansion later. @@ -167,7 +173,7 @@ } } } - return 255; + return DefaultSafeSPDisplacement; } bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const { @@ -191,11 +197,25 @@ const MachineFrameInfo &MFI = MF.getFrameInfo(); const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo(); // Retain behavior of always omitting the FP for leaf functions when possible. - return (MFI.hasCalls() && - MF.getTarget().Options.DisableFramePointerElim(MF)) || - MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || - MFI.hasStackMap() || MFI.hasPatchPoint() || - RegInfo->needsStackRealignment(MF); + if (MFI.hasCalls() && MF.getTarget().Options.DisableFramePointerElim(MF)) + return true; + if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() || + MFI.hasStackMap() || MFI.hasPatchPoint() || + RegInfo->needsStackRealignment(MF)) + return true; + // With large callframes around we may need to use FP to access the scavenging + // emergency spillslot. + // + // Unfortunately some calls to hasFP() like machine verifier -> + // getReservedReg() -> hasFP in the middle of global isel are too early + // to know the max call frame size. Hopefully conservatively returning "true" + // in those cases is fine. + // DefaultSafeSPDisplacement is fine as we only emergency spill GP regs. + if (!MFI.isMaxCallFrameSizeComputed() || + MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement) + return true; + + return false; } /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is Index: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.h @@ -647,6 +647,8 @@ SelectionDAG &DAG) const override; bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override; + + void finalizeLowering(MachineFunction &MF) const override; }; namespace AArch64 { Index: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10974,3 +10974,8 @@ return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32; } + +void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const { + MF.getFrameInfo().computeMaxCallFrameSize(MF); + TargetLoweringBase::finalizeLowering(MF); +} Index: llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -225,11 +225,13 @@ bool AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const { - const MachineFrameInfo &MFI = MF.getFrameInfo(); - // AArch64FrameLowering::resolveFrameIndexReference() can always fall back - // to the stack pointer, so only put the emergency spill slot next to the - // FP when there's no better way to access it (SP or base pointer). - return MFI.hasVarSizedObjects() && !hasBasePointer(MF); + // This function indicates whether the emergency spillslot should be placed + // close to the beginning of the stackframe (closer to FP) or the end + // (closer to SP). + // + // The beginning works most reliably if we have a frame pointer. + const AArch64FrameLowering &TFI = *getFrameLowering(MF); + return TFI.hasFP(MF); } bool AArch64RegisterInfo::requiresFrameIndexScavenging( Index: llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h +++ llvm/trunk/lib/Target/AArch64/AArch64Subtarget.h @@ -326,6 +326,8 @@ return false; } } + + void mirFileLoaded(MachineFunction &MF) const override; }; } // End llvm namespace Index: llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64Subtarget.cpp @@ -250,3 +250,13 @@ AArch64Subtarget::getCustomPBQPConstraints() const { return balanceFPOps() ? llvm::make_unique() : nullptr; } + +void AArch64Subtarget::mirFileLoaded(MachineFunction &MF) const { + // We usually compute max call frame size after ISel. Do the computation now + // if the .mir file didn't specify it. Note that this will probably give you + // bogus values after PEI has eliminated the callframe setup/destroy pseudo + // instructions, specify explicitely if you need it to be correct. + MachineFrameInfo &MFI = MF.getFrameInfo(); + if (!MFI.isMaxCallFrameSizeComputed()) + MFI.computeMaxCallFrameSize(MF); +} Index: llvm/trunk/test/CodeGen/AArch64/big-callframe.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/big-callframe.ll +++ llvm/trunk/test/CodeGen/AArch64/big-callframe.ll @@ -0,0 +1,15 @@ +; RUN: llc -o - %s | FileCheck %s +; Make sure we use a frame pointer and fp relative addressing for the emergency +; spillslot when we have gigantic callframes. +; CHECK-LABEL: func: +; CHECK: stur {{.*}}, [x29, #{{.*}}] // 8-byte Folded Spill +; CHECK: ldur {{.*}}, [x29, #{{.*}}] // 8-byte Folded Reload +target triple = "aarch64--" +declare void @extfunc([4096 x i64]* byval %p) +define void @func([4096 x i64]* %z) { + %lvar = alloca [31 x i8] + %v = load volatile [31 x i8], [31 x i8]* %lvar + store volatile [31 x i8] %v, [31 x i8]* %lvar + call void @extfunc([4096 x i64]* byval %z) + ret void +}