diff --git a/llvm/include/llvm/CodeGen/CallingConvLower.h b/llvm/include/llvm/CodeGen/CallingConvLower.h --- a/llvm/include/llvm/CodeGen/CallingConvLower.h +++ b/llvm/include/llvm/CodeGen/CallingConvLower.h @@ -175,7 +175,7 @@ SmallVectorImpl &Locs; LLVMContext &Context; - unsigned StackOffset; + unsigned StackSize; Align MaxStackArgAlign; SmallVector UsedRegs; SmallVector PendingLocs; @@ -236,17 +236,14 @@ CallingConv::ID getCallingConv() const { return CallingConv; } bool isVarArg() const { return IsVarArg; } - /// getNextStackOffset - Return the next stack offset such that all stack - /// slots satisfy their alignment requirements. - unsigned getNextStackOffset() const { - return StackOffset; - } + /// Returns the size of the currently allocated portion of the stack. + unsigned getStackSize() const { return StackSize; } /// getAlignedCallFrameSize - Return the size of the call frame needed to /// be able to store all arguments and such that the alignment requirement /// of each of the arguments is satisfied. unsigned getAlignedCallFrameSize() const { - return alignTo(StackOffset, MaxStackArgAlign); + return alignTo(StackSize, MaxStackArgAlign); } /// isAllocated - Return true if the specified register (or an alias) is @@ -400,9 +397,9 @@ /// AllocateStack - Allocate a chunk of stack space with the specified size /// and alignment. unsigned AllocateStack(unsigned Size, Align Alignment) { - StackOffset = alignTo(StackOffset, Alignment); - unsigned Result = StackOffset; - StackOffset += Size; + StackSize = alignTo(StackSize, Alignment); + unsigned Result = StackSize; + StackSize += Size; MaxStackArgAlign = std::max(Alignment, MaxStackArgAlign); ensureMaxAlignment(Alignment); return Result; diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h --- a/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/CallLowering.h @@ -188,7 +188,7 @@ if (getAssignFn(State.isVarArg())(ValNo, ValVT, LocVT, LocInfo, Flags, State)) return true; - StackOffset = State.getNextStackOffset(); + StackSize = State.getStackSize(); return false; } @@ -199,9 +199,8 @@ /// as AssignFn on most targets. CCAssignFn *AssignFnVarArg; - /// Stack offset for next argument. At the end of argument evaluation, this - /// is typically the total stack size. - uint64_t StackOffset = 0; + /// The size of the currently allocated portion of the stack. + uint64_t StackSize = 0; /// Select the appropriate assignment function depending on whether this is /// a variadic call. diff --git a/llvm/lib/CodeGen/CallingConvLower.cpp b/llvm/lib/CodeGen/CallingConvLower.cpp --- a/llvm/lib/CodeGen/CallingConvLower.cpp +++ b/llvm/lib/CodeGen/CallingConvLower.cpp @@ -30,7 +30,7 @@ : CallingConv(CC), IsVarArg(isVarArg), MF(mf), TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C) { // No stack is used. - StackOffset = 0; + StackSize = 0; clearByValRegsInfo(); UsedRegs.resize((TRI.getNumRegs()+31)/32); @@ -197,7 +197,7 @@ void CCState::getRemainingRegParmsForType(SmallVectorImpl &Regs, MVT VT, CCAssignFn Fn) { - unsigned SavedStackOffset = StackOffset; + unsigned SavedStackSize = StackSize; Align SavedMaxStackArgAlign = MaxStackArgAlign; unsigned NumLocs = Locs.size(); @@ -229,7 +229,7 @@ // Clear the assigned values and stack memory. We leave the registers marked // as allocated so that future queries don't return the same registers, i.e. // when i64 and f64 are both passed in GPRs. - StackOffset = SavedStackOffset; + StackSize = SavedStackSize; MaxStackArgAlign = SavedMaxStackArgAlign; Locs.truncate(NumLocs); } diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -3022,7 +3022,7 @@ CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC)); // Get a count of how many bytes are to be pushed on the stack. - NumBytes = CCInfo.getNextStackOffset(); + NumBytes = CCInfo.getStackSize(); // Issue CALLSEQ_START unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -6515,11 +6515,12 @@ } // This will point to the next argument passed via stack. - unsigned StackOffset = CCInfo.getNextStackOffset(); + unsigned VarArgsOffset = CCInfo.getStackSize(); // We currently pass all varargs at 8-byte alignment, or 4 for ILP32 - StackOffset = alignTo(StackOffset, Subtarget->isTargetILP32() ? 4 : 8); - FuncInfo->setVarArgsStackOffset(StackOffset); - FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true)); + VarArgsOffset = alignTo(VarArgsOffset, Subtarget->isTargetILP32() ? 4 : 8); + FuncInfo->setVarArgsStackOffset(VarArgsOffset); + FuncInfo->setVarArgsStackIndex( + MFI.CreateFixedObject(4, VarArgsOffset, true)); if (MFI.hasMustTailInVarArgFunc()) { SmallVector RegParmTypes; @@ -6559,7 +6560,7 @@ } } - unsigned StackArgSize = CCInfo.getNextStackOffset(); + unsigned StackArgSize = CCInfo.getStackSize(); bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) { // This is a non-standard ABI so by fiat I say we're allowed to make full @@ -6956,7 +6957,7 @@ // If the stack arguments for this call do not fit into our own save area then // the call cannot be made tail. - if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) + if (CCInfo.getStackSize() > FuncInfo->getBytesInStackArgArea()) return false; const MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -7120,7 +7121,7 @@ "site marked musttail"); // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = CCInfo.getNextStackOffset(); + unsigned NumBytes = CCInfo.getStackSize(); if (IsSibCall) { // Since we're not changing the ABI to make this a tail call, the memory diff --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp @@ -124,7 +124,7 @@ } else Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State); - StackOffset = State.getNextStackOffset(); + StackSize = State.getStackSize(); return Res; } }; @@ -706,7 +706,7 @@ } AArch64FunctionInfo *FuncInfo = MF.getInfo(); - uint64_t StackOffset = Assigner.StackOffset; + uint64_t StackSize = Assigner.StackSize; if (F.isVarArg()) { if ((!Subtarget.isTargetDarwin() && !Subtarget.isWindowsArm64EC()) || IsWin64) { // The AAPCS variadic function ABI is identical to the non-variadic @@ -720,22 +720,21 @@ } // We currently pass all varargs at 8-byte alignment, or 4 in ILP32. - StackOffset = - alignTo(Assigner.StackOffset, Subtarget.isTargetILP32() ? 4 : 8); + StackSize = alignTo(Assigner.StackSize, Subtarget.isTargetILP32() ? 4 : 8); auto &MFI = MIRBuilder.getMF().getFrameInfo(); - FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true)); + FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackSize, true)); } if (doesCalleeRestoreStack(F.getCallingConv(), MF.getTarget().Options.GuaranteedTailCallOpt)) { // We have a non-standard ABI, so why not make full use of the stack that // we're going to pop? It must be aligned to 16 B in any case. - StackOffset = alignTo(StackOffset, 16); + StackSize = alignTo(StackSize, 16); // If we're expected to restore the stack (e.g. fastcc), then we'll be // adding a multiple of 16. - FuncInfo->setArgumentStackToRestore(StackOffset); + FuncInfo->setArgumentStackToRestore(StackSize); // Our own callers will guarantee that the space is free by giving an // aligned value to CALLSEQ_START. @@ -745,7 +744,7 @@ // will fit on the caller's stack. So, whenever we lower formal arguments, // we should keep track of this information, since we might lower a tail call // in this function later. - FuncInfo->setBytesInStackArgArea(StackOffset); + FuncInfo->setBytesInStackArgArea(StackSize); if (Subtarget.hasCustomCallingConv()) Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF); @@ -861,7 +860,7 @@ // Make sure that they can fit on the caller's stack. const AArch64FunctionInfo *FuncInfo = MF.getInfo(); - if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) { + if (OutInfo.getStackSize() > FuncInfo->getBytesInStackArgArea()) { LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n"); return false; } @@ -1110,7 +1109,7 @@ // The callee will pop the argument stack as a tail call. Thus, we must // keep it 16-byte aligned. - NumBytes = alignTo(OutInfo.getNextStackOffset(), 16); + NumBytes = alignTo(OutInfo.getStackSize(), 16); // FPDiff will be negative if this tail call requires more space than we // would automatically have in our incoming argument space. Positive if we @@ -1315,12 +1314,12 @@ uint64_t CalleePopBytes = doesCalleeRestoreStack(Info.CallConv, MF.getTarget().Options.GuaranteedTailCallOpt) - ? alignTo(Assigner.StackOffset, 16) + ? alignTo(Assigner.StackSize, 16) : 0; - CallSeqStart.addImm(Assigner.StackOffset).addImm(0); + CallSeqStart.addImm(Assigner.StackSize).addImm(0); MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP) - .addImm(Assigner.StackOffset) + .addImm(Assigner.StackSize) .addImm(CalleePopBytes); // If Callee is a reg, since it is used by a target specific diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp @@ -726,7 +726,7 @@ if (!handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, B)) return false; - uint64_t StackOffset = Assigner.StackOffset; + uint64_t StackSize = Assigner.StackSize; // Start adding system SGPRs. if (IsEntryFunc) { @@ -741,7 +741,7 @@ // the caller's stack. So, whenever we lower formal arguments, we should keep // track of this information, since we might lower a tail call in this // function later. - Info->setBytesInStackArgArea(StackOffset); + Info->setBytesInStackArgArea(StackSize); // Move back to the end of the basic block. B.setMBB(MBB); @@ -1059,7 +1059,7 @@ // Make sure that they can fit on the caller's stack. const SIMachineFunctionInfo *FuncInfo = MF.getInfo(); - if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) { + if (OutInfo.getStackSize() > FuncInfo->getBytesInStackArgArea()) { LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n"); return false; } @@ -1230,7 +1230,7 @@ // The callee will pop the argument stack as a tail call. Thus, we must // keep it 16-byte aligned. - NumBytes = alignTo(OutInfo.getNextStackOffset(), ST.getStackAlignment()); + NumBytes = alignTo(OutInfo.getStackSize(), ST.getStackAlignment()); // FPDiff will be negative if this tail call requires more space than we // would automatically have in our incoming argument space. Positive if we @@ -1396,7 +1396,7 @@ handleImplicitCallArguments(MIRBuilder, MIB, ST, *MFI, ImplicitArgRegs); // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = CCInfo.getNextStackOffset(); + unsigned NumBytes = CCInfo.getStackSize(); // If Callee is a reg, since it is used by a target specific // instruction, it must have a register class matching the diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -2638,7 +2638,7 @@ DAG.getPass()->getAnalysis(); ArgUsageInfo.setFuncArgInfo(Fn, Info->getArgInfo()); - unsigned StackArgSize = CCInfo.getNextStackOffset(); + unsigned StackArgSize = CCInfo.getStackSize(); Info->setBytesInStackArgArea(StackArgSize); return Chains.empty() ? Chain : @@ -3094,7 +3094,7 @@ // If the stack arguments for this call do not fit into our own save area then // the call cannot be made tail. // TODO: Is this really necessary? - if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) + if (CCInfo.getStackSize() > FuncInfo->getBytesInStackArgArea()) return false; const MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -3201,7 +3201,7 @@ CCInfo.AnalyzeCallOperands(Outs, AssignFn); // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = CCInfo.getNextStackOffset(); + unsigned NumBytes = CCInfo.getStackSize(); if (IsSibCall) { // Since we're not changing the ABI to make this a tail call, the memory diff --git a/llvm/lib/Target/ARC/ARCISelLowering.cpp b/llvm/lib/Target/ARC/ARCISelLowering.cpp --- a/llvm/lib/Target/ARC/ARCISelLowering.cpp +++ b/llvm/lib/Target/ARC/ARCISelLowering.cpp @@ -283,11 +283,11 @@ // Analyze return values to determine the number of bytes of stack required. CCState RetCCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, *DAG.getContext()); - RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), Align(4)); + RetCCInfo.AllocateStack(CCInfo.getStackSize(), Align(4)); RetCCInfo.AnalyzeCallResult(Ins, RetCC_ARC); // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = RetCCInfo.getNextStackOffset(); + unsigned NumBytes = RetCCInfo.getStackSize(); Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); @@ -498,7 +498,7 @@ unsigned StackSlotSize = 4; if (!IsVarArg) - AFI->setReturnStackOffset(CCInfo.getNextStackOffset()); + AFI->setReturnStackOffset(CCInfo.getStackSize()); // All getCopyFromReg ops must precede any getMemcpys to prevent the // scheduler clobbering a register before it has been copied. @@ -565,7 +565,7 @@ // There are (std::size(ArgRegs) - FirstVAReg) registers which // need to be saved. int VarFI = MFI.CreateFixedObject((std::size(ArgRegs) - FirstVAReg) * 4, - CCInfo.getNextStackOffset(), true); + CCInfo.getStackSize(), true); AFI->setVarArgsFrameIndex(VarFI); SDValue FIN = DAG.getFrameIndex(VarFI, MVT::i32); for (unsigned i = FirstVAReg; i < std::size(ArgRegs); i++) { @@ -633,7 +633,7 @@ CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); if (!CCInfo.CheckReturn(Outs, RetCC_ARC)) return false; - if (CCInfo.getNextStackOffset() != 0 && IsVarArg) + if (CCInfo.getStackSize() != 0 && IsVarArg) return false; return true; } diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp --- a/llvm/lib/Target/ARM/ARMCallLowering.cpp +++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp @@ -528,12 +528,10 @@ // We now know the size of the stack - update the ADJCALLSTACKDOWN // accordingly. - CallSeqStart.addImm(ArgAssigner.StackOffset) - .addImm(0) - .add(predOps(ARMCC::AL)); + CallSeqStart.addImm(ArgAssigner.StackSize).addImm(0).add(predOps(ARMCC::AL)); MIRBuilder.buildInstr(ARM::ADJCALLSTACKUP) - .addImm(ArgAssigner.StackOffset) + .addImm(ArgAssigner.StackSize) .addImm(-1ULL) .add(predOps(ARMCC::AL)); diff --git a/llvm/lib/Target/ARM/ARMCallingConv.cpp b/llvm/lib/Target/ARM/ARMCallingConv.cpp --- a/llvm/lib/Target/ARM/ARMCallingConv.cpp +++ b/llvm/lib/Target/ARM/ARMCallingConv.cpp @@ -241,7 +241,7 @@ // Register allocation failed, we'll be needing the stack unsigned Size = LocVT.getSizeInBits() / 8; - if (LocVT == MVT::i32 && State.getNextStackOffset() == 0) { + if (LocVT == MVT::i32 && State.getStackSize() == 0) { // If nothing else has used the stack until this point, a non-HFA aggregate // can be split between regs and stack. unsigned RegIdx = State.getFirstUnallocated(RegList); diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -1928,7 +1928,7 @@ // At the point, we are able to handle the call's arguments in fast isel. // Get a count of how many bytes are to be pushed on the stack. - NumBytes = CCInfo.getNextStackOffset(); + NumBytes = CCInfo.getStackSize(); // Issue CALLSEQ_START unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -2417,7 +2417,7 @@ CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg)); // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = CCInfo.getNextStackOffset(); + unsigned NumBytes = CCInfo.getStackSize(); // SPDiff is the byte offset of the call's argument area from the callee's. // Stores to callee stack arguments will be placed in FixedStackSlots offset @@ -2912,7 +2912,7 @@ // all remained GPR regs. In that case we can't split parameter, we must // send it to stack. We also must set NCRN to R4, so waste all // remained registers. - const unsigned NSAAOffset = State->getNextStackOffset(); + const unsigned NSAAOffset = State->getStackSize(); if (NSAAOffset != 0 && Size > Excess) { while (State->AllocateReg(GPRArgRegs)) ; @@ -3078,7 +3078,7 @@ SmallVector ArgLocs; CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); - if (CCInfo.getNextStackOffset()) { + if (CCInfo.getStackSize()) { // Check if the arguments are already laid out in the right way as // the caller's fixed stack objects. MachineFrameInfo &MFI = MF.getFrameInfo(); @@ -4419,10 +4419,9 @@ // the result of va_next. // If there is no regs to be stored, just point address after last // argument passed via stack. - int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr, - CCInfo.getInRegsParamsCount(), - CCInfo.getNextStackOffset(), - std::max(4U, TotalArgRegsSaveSize)); + int FrameIndex = StoreByValRegs( + CCInfo, DAG, dl, Chain, nullptr, CCInfo.getInRegsParamsCount(), + CCInfo.getStackSize(), std::max(4U, TotalArgRegsSaveSize)); AFI->setVarArgsFrameIndex(FrameIndex); } @@ -4657,7 +4656,7 @@ // varargs if (isVarArg && MFI.hasVAStart()) { - VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset(), + VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getStackSize(), TotalArgRegsSaveSize); if (AFI->isCmseNSEntryFunction()) { DiagnosticInfoUnsupported Diag( @@ -4667,7 +4666,7 @@ } } - unsigned StackArgSize = CCInfo.getNextStackOffset(); + unsigned StackArgSize = CCInfo.getStackSize(); bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; if (canGuaranteeTCO(CallConv, TailCallOpt)) { // The only way to guarantee a tail call is if the callee restores its @@ -4679,7 +4678,7 @@ } AFI->setArgumentStackSize(StackArgSize); - if (CCInfo.getNextStackOffset() > 0 && AFI->isCmseNSEntryFunction()) { + if (CCInfo.getStackSize() > 0 && AFI->isCmseNSEntryFunction()) { DiagnosticInfoUnsupported Diag( DAG.getMachineFunction().getFunction(), "secure entry function requires arguments on stack", dl.getDebugLoc()); diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp --- a/llvm/lib/Target/AVR/AVRISelLowering.cpp +++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp @@ -1441,7 +1441,7 @@ // If the function takes variable number of arguments, make a frame index for // the start of the first vararg value... for expansion of llvm.va_start. if (isVarArg) { - unsigned StackSize = CCInfo.getNextStackOffset(); + unsigned StackSize = CCInfo.getStackSize(); AVRMachineFunctionInfo *AFI = MF.getInfo(); AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize, true)); @@ -1502,7 +1502,7 @@ } // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = CCInfo.getNextStackOffset(); + unsigned NumBytes = CCInfo.getStackSize(); Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp --- a/llvm/lib/Target/BPF/BPFISelLowering.cpp +++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp @@ -396,7 +396,7 @@ CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64); - unsigned NumBytes = CCInfo.getNextStackOffset(); + unsigned NumBytes = CCInfo.getStackSize(); if (Outs.size() > MaxArgs) fail(CLI.DL, DAG, "too many args to ", Callee); diff --git a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp --- a/llvm/lib/Target/CSKY/CSKYISelLowering.cpp +++ b/llvm/lib/Target/CSKY/CSKYISelLowering.cpp @@ -378,7 +378,7 @@ // If all registers are allocated, then all varargs must be passed on the // stack and we don't need to save any argregs. if (ArgRegs.size() == Idx) { - VaArgOffset = CCInfo.getNextStackOffset(); + VaArgOffset = CCInfo.getStackSize(); VarArgsSaveSize = 0; } else { VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); @@ -531,7 +531,7 @@ "site marked musttail"); // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = ArgCCInfo.getNextStackOffset(); + unsigned NumBytes = ArgCCInfo.getStackSize(); // Create local copies for byval args SmallVector ByValArgs; diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -453,7 +453,7 @@ "Not eligible for Tail Call\n")); } // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = CCInfo.getNextStackOffset(); + unsigned NumBytes = CCInfo.getStackSize(); SmallVector, 16> RegsToPass; SmallVector MemOpChains; @@ -907,7 +907,7 @@ if (RegSaveAreaSizePlusPadding > 0) { // The offset to saved register area should be 8 byte aligned. - int RegAreaStart = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset(); + int RegAreaStart = HEXAGON_LRFP_SIZE + CCInfo.getStackSize(); if (!(RegAreaStart % 8)) RegAreaStart = (RegAreaStart + 7) & -8; @@ -922,7 +922,7 @@ } else { // This will point to the next argument passed via stack, when // there is no saved register area. - int Offset = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset(); + int Offset = HEXAGON_LRFP_SIZE + CCInfo.getStackSize(); int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true); HMFI.setRegSavedAreaStartFrameIndex(FI); HMFI.setVarArgsFrameIndex(FI); @@ -932,7 +932,7 @@ if (IsVarArg && !Subtarget.isEnvironmentMusl()) { // This will point to the next argument passed via stack. - int Offset = HEXAGON_LRFP_SIZE + CCInfo.getNextStackOffset(); + int Offset = HEXAGON_LRFP_SIZE + CCInfo.getStackSize(); int FI = MFI.CreateFixedObject(Hexagon_PointerSize, Offset, true); HMFI.setVarArgsFrameIndex(FI); } diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp --- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp +++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp @@ -519,7 +519,7 @@ if (IsVarArg) { // Record the frame index of the first variable argument // which is a value necessary to VASTART. - int FI = MFI.CreateFixedObject(4, CCInfo.getNextStackOffset(), true); + int FI = MFI.CreateFixedObject(4, CCInfo.getStackSize(), true); LanaiMFI->setVarArgsFrameIndex(FI); } @@ -627,7 +627,7 @@ } // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = CCInfo.getNextStackOffset(); + unsigned NumBytes = CCInfo.getStackSize(); // Create local copies for byval args. SmallVector ByValArgs; diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -2298,7 +2298,7 @@ // If all registers are allocated, then all varargs must be passed on the // stack and we don't need to save any argregs. if (ArgRegs.size() == Idx) { - VaArgOffset = CCInfo.getNextStackOffset(); + VaArgOffset = CCInfo.getStackSize(); VarArgsSaveSize = 0; } else { VarArgsSaveSize = GRLenInBytes * (ArgRegs.size() - Idx); @@ -2396,7 +2396,7 @@ auto CallerCC = Caller.getCallingConv(); // Do not tail call opt if the stack is used to pass parameters. - if (CCInfo.getNextStackOffset() != 0) + if (CCInfo.getStackSize() != 0) return false; // Do not tail call opt if any parameters need to be passed indirectly. @@ -2472,7 +2472,7 @@ "site marked musttail"); // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = ArgCCInfo.getNextStackOffset(); + unsigned NumBytes = ArgCCInfo.getStackSize(); // Create local copies for byval args. SmallVector ByValArgs; diff --git a/llvm/lib/Target/M68k/GISel/M68kCallLowering.cpp b/llvm/lib/Target/M68k/GISel/M68kCallLowering.cpp --- a/llvm/lib/Target/M68k/GISel/M68kCallLowering.cpp +++ b/llvm/lib/Target/M68k/GISel/M68kCallLowering.cpp @@ -221,10 +221,10 @@ return false; } - CallSeqStart.addImm(Assigner.StackOffset).addImm(0); + CallSeqStart.addImm(Assigner.StackSize).addImm(0); unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); - MIRBuilder.buildInstr(AdjStackUp).addImm(Assigner.StackOffset).addImm(0); + MIRBuilder.buildInstr(AdjStackUp).addImm(Assigner.StackSize).addImm(0); return true; } diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp --- a/llvm/lib/Target/M68k/M68kISelLowering.cpp +++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp @@ -1005,7 +1005,7 @@ } } - unsigned StackSize = CCInfo.getNextStackOffset(); + unsigned StackSize = CCInfo.getStackSize(); // Align stack specially for tail calls. if (shouldGuaranteeTCO(CCID, MF.getTarget().Options.GuaranteedTailCallOpt)) StackSize = GetAlignedArgumentStackSize(StackSize, DAG); @@ -1294,9 +1294,9 @@ CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C); CCInfo.AnalyzeCallOperands(Outs, CC_M68k); - StackArgsSize = CCInfo.getNextStackOffset(); + StackArgsSize = CCInfo.getStackSize(); - if (CCInfo.getNextStackOffset()) { + if (StackArgsSize) { // Check if the arguments are already laid out in the right way as // the caller's fixed stack objects. MachineFrameInfo &MFI = MF.getFrameInfo(); diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp --- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -631,7 +631,7 @@ // Create frame index for the start of the first vararg value if (isVarArg) { - unsigned Offset = CCInfo.getNextStackOffset(); + unsigned Offset = CCInfo.getStackSize(); FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, Offset, true)); } @@ -814,7 +814,7 @@ AnalyzeArguments(CCInfo, ArgLocs, Outs); // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = CCInfo.getNextStackOffset(); + unsigned NumBytes = CCInfo.getStackSize(); MVT PtrVT = getFrameIndexTy(DAG.getDataLayout()); Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp --- a/llvm/lib/Target/Mips/MipsCallLowering.cpp +++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp @@ -412,7 +412,7 @@ int VaArgOffset; unsigned RegSize = 4; if (ArgRegs.size() == Idx) - VaArgOffset = alignTo(CCInfo.getNextStackOffset(), RegSize); + VaArgOffset = alignTo(CCInfo.getStackSize(), RegSize); else { VaArgOffset = (int)ABI.GetCalleeAllocdArgSizeInBytes(CCInfo.getCallingConv()) - @@ -524,14 +524,14 @@ if (!handleAssignments(ArgHandler, ArgInfos, CCInfo, ArgLocs, MIRBuilder)) return false; - unsigned NextStackOffset = CCInfo.getNextStackOffset(); + unsigned StackSize = CCInfo.getStackSize(); unsigned StackAlignment = F.getParent()->getOverrideStackAlignment(); if (!StackAlignment) { const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); StackAlignment = TFL->getStackAlignment(); } - NextStackOffset = alignTo(NextStackOffset, StackAlignment); - CallSeqStart.addImm(NextStackOffset).addImm(0); + StackSize = alignTo(StackSize, StackAlignment); + CallSeqStart.addImm(StackSize).addImm(0); if (IsCalleeGlobalPIC) { MIRBuilder.buildCopy( @@ -570,7 +570,7 @@ return false; } - MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0); + MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(StackSize).addImm(0); return true; } diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp --- a/llvm/lib/Target/Mips/MipsFastISel.cpp +++ b/llvm/lib/Target/Mips/MipsFastISel.cpp @@ -1135,7 +1135,7 @@ CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context); CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC)); // Get a count of how many bytes are to be pushed on the stack. - NumBytes = CCInfo.getNextStackOffset(); + NumBytes = CCInfo.getStackSize(); // This is the minimum argument area used for A0-A3. if (NumBytes < 16) NumBytes = 16; diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -3214,7 +3214,7 @@ ES ? ES->getSymbol() : nullptr); // Get a count of how many bytes are to be pushed on the stack. - unsigned NextStackOffset = CCInfo.getNextStackOffset(); + unsigned StackSize = CCInfo.getStackSize(); // Call site info for function parameters tracking. MachineFunction::CallSiteInfo CSInfo; @@ -3224,8 +3224,8 @@ bool InternalLinkage = false; if (IsTailCall) { IsTailCall = isEligibleForTailCallOptimization( - CCInfo, NextStackOffset, *MF.getInfo()); - if (GlobalAddressSDNode *G = dyn_cast(Callee)) { + CCInfo, StackSize, *MF.getInfo()); + if (GlobalAddressSDNode *G = dyn_cast(Callee)) { InternalLinkage = G->getGlobal()->hasInternalLinkage(); IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() || G->getGlobal()->hasPrivateLinkage() || @@ -3244,10 +3244,10 @@ // ByValChain is the output chain of the last Memcpy node created for copying // byval arguments to the stack. unsigned StackAlignment = TFL->getStackAlignment(); - NextStackOffset = alignTo(NextStackOffset, StackAlignment); + StackSize = alignTo(StackSize, StackAlignment); if (!(IsTailCall || MemcpyInByVal)) - Chain = DAG.getCALLSEQ_START(Chain, NextStackOffset, 0, DL); + Chain = DAG.getCALLSEQ_START(Chain, StackSize, 0, DL); SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP, @@ -3475,7 +3475,7 @@ // Create the CALLSEQ_END node in the case of where it is not a call to // memcpy. if (!(MemcpyInByVal)) { - Chain = DAG.getCALLSEQ_END(Chain, NextStackOffset, 0, InGlue, DL); + Chain = DAG.getCALLSEQ_END(Chain, StackSize, 0, InGlue, DL); InGlue = Chain.getValue(1); } @@ -3640,7 +3640,7 @@ "Functions with the interrupt attribute cannot have arguments!"); CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg); - MipsFI->setFormalArgInfo(CCInfo.getNextStackOffset(), + MipsFI->setFormalArgInfo(CCInfo.getStackSize(), CCInfo.getInRegsParamsCount() > 0); unsigned CurArgIdx = 0; @@ -4502,7 +4502,7 @@ int VaArgOffset; if (ArgRegs.size() == Idx) - VaArgOffset = alignTo(State.getNextStackOffset(), RegSizeInBytes); + VaArgOffset = alignTo(State.getStackSize(), RegSizeInBytes); else { VaArgOffset = (int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) - diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -1404,7 +1404,7 @@ } // Get a count of how many bytes are to be pushed onto the stack. - NumBytes = CCInfo.getNextStackOffset(); + NumBytes = CCInfo.getStackSize(); // The prolog code of the callee may store up to 8 GPR argument registers to // the stack, allowing va_start to index over them in memory if its varargs. diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -4181,12 +4181,12 @@ ByValArgLocs, *DAG.getContext()); // Reserve stack space for the allocations in CCInfo. - CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); + CCByValInfo.AllocateStack(CCInfo.getStackSize(), PtrAlign); CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); // Area that is at least reserved in the caller of this function. - unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); + unsigned MinReservedArea = CCByValInfo.getStackSize(); MinReservedArea = std::max(MinReservedArea, LinkageSize); // Set the size that is at least reserved in caller of this function. Tail @@ -4224,9 +4224,8 @@ int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; - FuncInfo->setVarArgsStackOffset( - MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, - CCInfo.getNextStackOffset(), true)); + FuncInfo->setVarArgsStackOffset(MFI.CreateFixedObject( + PtrVT.getSizeInBits() / 8, CCInfo.getStackSize(), true)); FuncInfo->setVarArgsFrameIndex( MFI.CreateStackObject(Depth, Align(8), false)); @@ -5852,14 +5851,14 @@ CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext()); // Reserve stack space for the allocations in CCInfo. - CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); + CCByValInfo.AllocateStack(CCInfo.getStackSize(), PtrAlign); CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); // Size of the linkage area, parameter list area and the part of the local // space variable where copies of aggregates which are passed by value are // stored. - unsigned NumBytes = CCByValInfo.getNextStackOffset(); + unsigned NumBytes = CCByValInfo.getStackSize(); // Calculate by how many bytes the stack has to be adjusted in case of tail // call optimization. @@ -6680,8 +6679,7 @@ // but needs a MemLoc for a stack slot for the formal arguments side. if (ByValSize == 0) { State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE, - State.getNextStackOffset(), RegVT, - LocInfo)); + State.getStackSize(), RegVT, LocInfo)); return false; } @@ -7225,7 +7223,7 @@ const unsigned MinParameterSaveArea = 8 * PtrByteSize; // Area that is at least reserved in the caller of this function. unsigned CallerReservedArea = - std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea); + std::max(CCInfo.getStackSize(), LinkageSize + MinParameterSaveArea); // Set the size that is at least reserved in caller of this function. Tail // call optimized function's reserved stack space needs to be aligned so @@ -7237,7 +7235,7 @@ if (isVarArg) { FuncInfo->setVarArgsFrameIndex( - MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true)); + MFI.CreateFixedObject(PtrByteSize, CCInfo.getStackSize(), true)); SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6, @@ -7251,7 +7249,7 @@ // VarArgsFrameIndex on the stack so that they may be loaded by // dereferencing the result of va_next. for (unsigned GPRIndex = - (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize; + (CCInfo.getStackSize() - LinkageSize) / PtrByteSize; GPRIndex < NumGPArgRegs; ++GPRIndex) { const Register VReg = @@ -7317,8 +7315,8 @@ // conservatively assume that it is needed. As such, make sure we have at // least enough stack space for the caller to store the 8 GPRs. const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize; - const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize, - CCInfo.getNextStackOffset()); + const unsigned NumBytes = + std::max(LinkageSize + MinParameterSaveAreaSize, CCInfo.getStackSize()); // Adjust the stack pointer for the new arguments... // These operations are automatically eliminated by the prolog/epilog pass. diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -14151,7 +14151,7 @@ // If all registers are allocated, then all varargs must be passed on the // stack and we don't need to save any argregs. if (ArgRegs.size() == Idx) { - VaArgOffset = CCInfo.getNextStackOffset(); + VaArgOffset = CCInfo.getStackSize(); VarArgsSaveSize = 0; } else { VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); @@ -14221,7 +14221,7 @@ return false; // Do not tail call opt if the stack is used to pass parameters. - if (CCInfo.getNextStackOffset() != 0) + if (CCInfo.getStackSize() != 0) return false; // Do not tail call opt if any parameters need to be passed indirectly. @@ -14308,7 +14308,7 @@ "site marked musttail"); // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = ArgCCInfo.getNextStackOffset(); + unsigned NumBytes = ArgCCInfo.getStackSize(); // Create local copies for byval args SmallVector ByValArgs; diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -584,7 +584,7 @@ }; unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs); const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6; - unsigned ArgOffset = CCInfo.getNextStackOffset(); + unsigned ArgOffset = CCInfo.getStackSize(); if (NumAllocated == 6) ArgOffset += StackOffset; else { @@ -703,7 +703,7 @@ // // The va_start intrinsic needs to know the offset to the first variable // argument. - unsigned ArgOffset = CCInfo.getNextStackOffset(); + unsigned ArgOffset = CCInfo.getStackSize(); SparcMachineFunctionInfo *FuncInfo = MF.getInfo(); // Skip the 128 bytes of register save area. FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea + @@ -773,8 +773,8 @@ // Do not tail call opt if the stack is used to pass parameters. // 64-bit targets have a slightly higher limit since the ABI requires // to allocate some space even when all the parameters fit inside registers. - unsigned StackOffsetLimit = Subtarget->is64Bit() ? 48 : 0; - if (CCInfo.getNextStackOffset() > StackOffsetLimit) + unsigned StackSizeLimit = Subtarget->is64Bit() ? 48 : 0; + if (CCInfo.getStackSize() > StackSizeLimit) return false; // Do not tail call opt if either the callee or caller returns @@ -816,7 +816,7 @@ CCInfo, CLI, DAG.getMachineFunction()); // Get the size of the outgoing arguments stack space requirement. - unsigned ArgsSize = CCInfo.getNextStackOffset(); + unsigned ArgsSize = CCInfo.getStackSize(); // Keep stack frames 8-byte aligned. ArgsSize = (ArgsSize+7) & ~7; @@ -1204,7 +1204,7 @@ // Called functions expect 6 argument words to exist in the stack frame, used // or not. unsigned StackReserved = 6 * 8u; - unsigned ArgsSize = std::max(StackReserved, CCInfo.getNextStackOffset()); + unsigned ArgsSize = std::max(StackReserved, CCInfo.getStackSize()); // Keep stack frames 16-byte aligned. ArgsSize = alignTo(ArgsSize, 16); diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -1602,8 +1602,9 @@ // Likewise the address (in the form of a frame index) of where the // first stack vararg would be. The 1-byte size here is arbitrary. - int64_t StackSize = CCInfo.getNextStackOffset(); - FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true)); + int64_t VarArgsOffset = CCInfo.getStackSize(); + FuncInfo->setVarArgsFrameIndex( + MFI.CreateFixedObject(1, VarArgsOffset, true)); // ...and a similar frame index for the caller-allocated save area // that will be used to store the incoming registers. @@ -1699,7 +1700,7 @@ IsTailCall = false; // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = ArgCCInfo.getNextStackOffset(); + unsigned NumBytes = ArgCCInfo.getStackSize(); if (Subtarget.isTargetXPLINK64()) // Although the XPLINK specifications for AMODE64 state that minimum size diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -615,7 +615,7 @@ CCInfo2.AnalyzeCallOperands(CLI.Outs, getParamCC(CLI.CallConv, true)); // Get the size of the outgoing arguments stack space requirement. - unsigned ArgsSize = CCInfo.getNextStackOffset(); + unsigned ArgsSize = CCInfo.getStackSize(); // Keep stack frames 16-byte aligned. ArgsSize = alignTo(ArgsSize, 16); diff --git a/llvm/lib/Target/X86/X86CallLowering.cpp b/llvm/lib/Target/X86/X86CallLowering.cpp --- a/llvm/lib/Target/X86/X86CallLowering.cpp +++ b/llvm/lib/Target/X86/X86CallLowering.cpp @@ -70,7 +70,7 @@ const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags, CCState &State) override { bool Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State); - StackSize = State.getNextStackOffset(); + StackSize = State.getStackSize(); static const MCPhysReg XMMArgRegs[] = {X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4314,7 +4314,7 @@ } } - unsigned StackSize = CCInfo.getNextStackOffset(); + unsigned StackSize = CCInfo.getStackSize(); // Align stack specially for tail calls. if (shouldGuaranteeTCO(CallConv, MF.getTarget().Options.GuaranteedTailCallOpt)) @@ -5292,9 +5292,9 @@ CCInfo.AllocateStack(32, Align(8)); CCInfo.AnalyzeCallOperands(Outs, CC_X86); - StackArgsSize = CCInfo.getNextStackOffset(); + StackArgsSize = CCInfo.getStackSize(); - if (CCInfo.getNextStackOffset()) { + if (CCInfo.getStackSize()) { // Check if the arguments are already laid out in the right way as // the caller's fixed stack objects. MachineFrameInfo &MFI = MF.getFrameInfo(); diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp --- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp +++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp @@ -1122,11 +1122,11 @@ // Analyze return values to determine the number of bytes of stack required. CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, *DAG.getContext()); - RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), Align(4)); + RetCCInfo.AllocateStack(CCInfo.getStackSize(), Align(4)); RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore); // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = RetCCInfo.getNextStackOffset(); + unsigned NumBytes = RetCCInfo.getStackSize(); Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); @@ -1272,7 +1272,7 @@ unsigned LRSaveSize = StackSlotSize; if (!isVarArg) - XFI->setReturnStackOffset(CCInfo.getNextStackOffset() + LRSaveSize); + XFI->setReturnStackOffset(CCInfo.getStackSize() + LRSaveSize); // All getCopyFromReg ops must precede any getMemcpys to prevent the // scheduler clobbering a register before it has been copied. @@ -1366,8 +1366,7 @@ } else { // This will point to the next argument passed via stack. XFI->setVarArgsFrameIndex( - MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(), - true)); + MFI.CreateFixedObject(4, LRSaveSize + CCInfo.getStackSize(), true)); } } @@ -1419,7 +1418,7 @@ CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); if (!CCInfo.CheckReturn(Outs, RetCC_XCore)) return false; - if (CCInfo.getNextStackOffset() != 0 && isVarArg) + if (CCInfo.getStackSize() != 0 && isVarArg) return false; return true; }