diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -1764,7 +1764,7 @@ unsigned RegIdx = CCInfo.getFirstUnallocated(ArgVGPRs); if (RegIdx == ArgVGPRs.size()) { // Spill to stack required. - int64_t Offset = CCInfo.AllocateStack(4, 4); + int64_t Offset = CCInfo.AllocateStack(4, Align(4)); return ArgDescriptor::createStack(Offset, Mask); } @@ -2596,7 +2596,8 @@ if (!CCInfo.AllocateReg(OutgoingArg->getRegister())) report_fatal_error("failed to allocate implicit input argument"); } else { - unsigned SpecialArgOffset = CCInfo.AllocateStack(ArgVT.getStoreSize(), 4); + unsigned SpecialArgOffset = + CCInfo.AllocateStack(ArgVT.getStoreSize(), Align(4)); SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, SpecialArgOffset); MemOpChains.push_back(ArgStore); @@ -2663,7 +2664,7 @@ RegsToPass.emplace_back(OutgoingArg->getRegister(), InputReg); CCInfo.AllocateReg(OutgoingArg->getRegister()); } else { - unsigned SpecialArgOffset = CCInfo.AllocateStack(4, 4); + unsigned SpecialArgOffset = CCInfo.AllocateStack(4, Align(4)); SDValue ArgStore = storeStackInputValue(DAG, DL, Chain, InputReg, SpecialArgOffset); MemOpChains.push_back(ArgStore); diff --git a/llvm/lib/Target/ARC/ARCISelLowering.cpp b/llvm/lib/Target/ARC/ARCISelLowering.cpp --- a/llvm/lib/Target/ARC/ARCISelLowering.cpp +++ b/llvm/lib/Target/ARC/ARCISelLowering.cpp @@ -245,7 +245,7 @@ // Analyze return values to determine the number of bytes of stack required. CCState RetCCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, *DAG.getContext()); - RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4); + RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), Align(4)); RetCCInfo.AnalyzeCallResult(Ins, RetCC_ARC); // Get a count of how many bytes are to be pushed on the stack. @@ -622,7 +622,7 @@ // Analyze return values. if (!IsVarArg) - CCInfo.AllocateStack(AFI->getReturnStackOffset(), 4); + CCInfo.AllocateStack(AFI->getReturnStackOffset(), Align(4)); CCInfo.AnalyzeReturn(Outs, RetCC_ARC); diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp --- a/llvm/lib/Target/AVR/AVRISelLowering.cpp +++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp @@ -993,8 +993,7 @@ for (unsigned j = 0; j != Size; ++j) { unsigned Offset = CCInfo.AllocateStack( TD->getTypeAllocSize(EVT(LocVT).getTypeForEVT(CCInfo.getContext())), - TD->getABITypeAlignment( - EVT(LocVT).getTypeForEVT(CCInfo.getContext()))); + TD->getABITypeAlign(EVT(LocVT).getTypeForEVT(CCInfo.getContext()))); CCInfo.addLoc(CCValAssign::getMem(ValNo++, LocVT, Offset, LocVT, CCValAssign::Full)); } diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp --- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp +++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp @@ -388,7 +388,7 @@ } // VarArgs get passed on stack - unsigned Offset = State.AllocateStack(4, 4); + unsigned Offset = State.AllocateStack(4, Align(4)); State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); return false; } diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp --- a/llvm/lib/Target/Mips/MipsCallLowering.cpp +++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp @@ -451,7 +451,7 @@ static_cast(MF.getTarget()); const MipsABIInfo &ABI = TM.getABI(); CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()), - 1); + Align(1)); CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall()); setLocInfo(ArgLocs, Ins); @@ -572,7 +572,8 @@ MipsCCState CCInfo(F.getCallingConv(), IsCalleeVarArg, MF, ArgLocs, F.getContext()); - CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(Info.CallConv), 1); + CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(Info.CallConv), + Align(1)); const char *Call = Info.Callee.isSymbol() ? Info.Callee.getSymbolName() : nullptr; CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call); diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -2963,7 +2963,8 @@ llvm_unreachable("Cannot handle this ValVT."); if (!Reg) { - unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign); + unsigned Offset = + State.AllocateStack(ValVT.getStoreSize(), Align(OrigAlign)); State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); } else State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); @@ -3209,7 +3210,7 @@ // caller side but removing it breaks the frame size calculation. unsigned ReservedArgArea = MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv); - CCInfo.AllocateStack(ReservedArgArea, 1); + CCInfo.AllocateStack(ReservedArgArea, Align(1)); CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(), ES ? ES->getSymbol() : nullptr); @@ -3631,7 +3632,7 @@ SmallVector ArgLocs; MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext()); - CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1); + CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), Align(1)); const Function &Func = DAG.getMachineFunction().getFunction(); Function::const_arg_iterator FuncArg = Func.arg_begin(); diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -1384,7 +1384,7 @@ // Reserve space for the linkage area on the stack. unsigned LinkageSize = PPCSubTarget->getFrameLowering()->getLinkageSize(); - CCInfo.AllocateStack(LinkageSize, 8); + CCInfo.AllocateStack(LinkageSize, Align(8)); CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_PPC64_ELF_FIS); diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -3638,7 +3638,7 @@ // Potential tail calls could cause overwriting of argument stack slots. bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && (CallConv == CallingConv::Fast)); - unsigned PtrByteSize = 4; + const Align PtrAlign(4); // Assign locations to all of the incoming arguments. SmallVector ArgLocs; @@ -3647,7 +3647,7 @@ // Reserve space for the linkage area on the stack. unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); - CCInfo.AllocateStack(LinkageSize, PtrByteSize); + CCInfo.AllocateStack(LinkageSize, PtrAlign); if (useSoftFloat()) CCInfo.PreAnalyzeFormalArguments(Ins); @@ -3756,7 +3756,7 @@ ByValArgLocs, *DAG.getContext()); // Reserve stack space for the allocations in CCInfo. - CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); + CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); @@ -5705,7 +5705,7 @@ CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && "Unknown calling convention!"); - unsigned PtrByteSize = 4; + const Align PtrAlign(4); MachineFunction &MF = DAG.getMachineFunction(); @@ -5728,7 +5728,7 @@ // Reserve space for the linkage area on the stack. CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), - PtrByteSize); + PtrAlign); if (useSoftFloat()) CCInfo.PreAnalyzeCallOperands(Outs); @@ -5770,7 +5770,7 @@ CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext()); // Reserve stack space for the allocations in CCInfo. - CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); + CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign); CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); @@ -6985,7 +6985,7 @@ const PPCSubtarget &Subtarget = static_cast( State.getMachineFunction().getSubtarget()); const bool IsPPC64 = Subtarget.isPPC64(); - const unsigned PtrByteSize = IsPPC64 ? 8 : 4; + const Align PtrAlign = IsPPC64 ? Align(8) : Align(4); const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; assert((!ValVT.isInteger() || @@ -7009,7 +7009,7 @@ PPC::X7, PPC::X8, PPC::X9, PPC::X10}; if (ArgFlags.isByVal()) { - if (ArgFlags.getNonZeroByValAlign() > PtrByteSize) + if (ArgFlags.getNonZeroByValAlign() > PtrAlign) report_fatal_error("Pass-by-value arguments with alignment greater than " "register width are not supported."); @@ -7024,10 +7024,10 @@ return false; } - const unsigned StackSize = alignTo(ByValSize, PtrByteSize); - unsigned Offset = State.AllocateStack(StackSize, PtrByteSize); + const unsigned StackSize = alignTo(ByValSize, PtrAlign); + unsigned Offset = State.AllocateStack(StackSize, PtrAlign); for (const unsigned E = Offset + StackSize; Offset < E; - Offset += PtrByteSize) { + Offset += PtrAlign.value()) { if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo)); else { @@ -7050,7 +7050,7 @@ LLVM_FALLTHROUGH; case MVT::i1: case MVT::i32: { - const unsigned Offset = State.AllocateStack(PtrByteSize, PtrByteSize); + const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign); // AIX integer arguments are always passed in register width. if (ValVT.getSizeInBits() < RegVT.getSizeInBits()) LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt @@ -7068,13 +7068,14 @@ const unsigned StoreSize = LocVT.getStoreSize(); // Floats are always 4-byte aligned in the PSA on AIX. // This includes f64 in 64-bit mode for ABI compatibility. - const unsigned Offset = State.AllocateStack(IsPPC64 ? 8 : StoreSize, 4); + const unsigned Offset = + State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4)); unsigned FReg = State.AllocateReg(FPR); if (FReg) State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo)); // Reserve and initialize GPRs or initialize the PSA as required. - for (unsigned I = 0; I < StoreSize; I += PtrByteSize) { + for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) { if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) { assert(FReg && "An FPR should be available when a GPR is reserved."); if (State.isVarArg()) { @@ -7191,7 +7192,7 @@ const EVT PtrVT = getPointerTy(MF.getDataLayout()); // Reserve space for the linkage area on the stack. const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); - CCInfo.AllocateStack(LinkageSize, PtrByteSize); + CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize)); CCInfo.AnalyzeFormalArguments(Ins, CC_AIX); SmallVector MemOps; @@ -7413,7 +7414,7 @@ const bool IsPPC64 = Subtarget.isPPC64(); const EVT PtrVT = getPointerTy(DAG.getDataLayout()); const unsigned PtrByteSize = IsPPC64 ? 8 : 4; - CCInfo.AllocateStack(LinkageSize, PtrByteSize); + CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize)); CCInfo.AnalyzeCallOperands(Outs, CC_AIX); // The prolog code of the callee may store up to 8 GPR argument registers to diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1461,11 +1461,11 @@ unsigned StackAlign = std::max(XLenInBytes, ArgFlags1.getOrigAlign()); State.addLoc( CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), - State.AllocateStack(XLenInBytes, StackAlign), + State.AllocateStack(XLenInBytes, Align(StackAlign)), VA1.getLocVT(), CCValAssign::Full)); State.addLoc(CCValAssign::getMem( - ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, - CCValAssign::Full)); + ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), + LocVT2, CCValAssign::Full)); return false; } @@ -1476,8 +1476,8 @@ } else { // The second half is passed via the stack, without additional alignment. State.addLoc(CCValAssign::getMem( - ValNo2, ValVT2, State.AllocateStack(XLenInBytes, XLenInBytes), LocVT2, - CCValAssign::Full)); + ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), + LocVT2, CCValAssign::Full)); } return false; @@ -1572,13 +1572,13 @@ Register Reg = State.AllocateReg(ArgGPRs); LocVT = MVT::i32; if (!Reg) { - unsigned StackOffset = State.AllocateStack(8, 8); + unsigned StackOffset = State.AllocateStack(8, Align(8)); State.addLoc( CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); return false; } if (!State.AllocateReg(ArgGPRs)) - State.AllocateStack(4, 4); + State.AllocateStack(4, Align(4)); State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return false; } @@ -1618,7 +1618,8 @@ Reg = State.AllocateReg(ArgFPR64s, ArgFPR32s); else Reg = State.AllocateReg(ArgGPRs); - unsigned StackOffset = Reg ? 0 : State.AllocateStack(XLen / 8, XLen / 8); + unsigned StackOffset = + Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8)); // If we reach this point and PendingLocs is non-empty, we must be at the // end of a split argument that must be passed indirectly. @@ -1887,13 +1888,13 @@ } if (LocVT == MVT::i32 || LocVT == MVT::f32) { - unsigned Offset4 = State.AllocateStack(4, 4); + unsigned Offset4 = State.AllocateStack(4, Align(4)); State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); return false; } if (LocVT == MVT::i64 || LocVT == MVT::f64) { - unsigned Offset5 = State.AllocateStack(8, 8); + unsigned Offset5 = State.AllocateStack(8, Align(8)); State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); return false; } diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -63,9 +63,8 @@ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); } else { // Assign whole thing in stack. - State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, - State.AllocateStack(8,4), - LocVT, LocInfo)); + State.addLoc(CCValAssign::getCustomMem( + ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo)); return true; } @@ -73,9 +72,8 @@ if (unsigned Reg = State.AllocateReg(RegList)) State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); else - State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, - State.AllocateStack(4,4), - LocVT, LocInfo)); + State.addLoc(CCValAssign::getCustomMem( + ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo)); return true; } @@ -112,7 +110,7 @@ // Stack space is allocated for all arguments starting from [%fp+BIAS+128]. unsigned size = (LocVT == MVT::f128) ? 16 : 8; - unsigned alignment = (LocVT == MVT::f128) ? 16 : 8; + Align alignment = (LocVT == MVT::f128) ? Align(16) : Align(8); unsigned Offset = State.AllocateStack(size, alignment); unsigned Reg = 0; @@ -152,7 +150,7 @@ MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations"); - unsigned Offset = State.AllocateStack(4, 4); + unsigned Offset = State.AllocateStack(4, Align(4)); if (LocVT == MVT::f32 && Offset < 16*8) { // Promote floats to %f0-%f31. diff --git a/llvm/lib/Target/SystemZ/SystemZCallingConv.h b/llvm/lib/Target/SystemZ/SystemZCallingConv.h --- a/llvm/lib/Target/SystemZ/SystemZCallingConv.h +++ b/llvm/lib/Target/SystemZ/SystemZCallingConv.h @@ -108,7 +108,7 @@ // the location (register or stack slot) for the indirect pointer. // (This duplicates the usual i64 calling convention rules.) unsigned Reg = State.AllocateReg(SystemZ::ArgGPRs); - unsigned Offset = Reg ? 0 : State.AllocateStack(8, 8); + unsigned Offset = Reg ? 0 : State.AllocateStack(8, Align(8)); // Use that same location for all the pending parts. for (auto &It : PendingMembers) { diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -49,9 +49,9 @@ // | empty| float| // +------+------+ // Use align=8 for dummy area to align the beginning of these 2 area. - State.AllocateStack(4, 8); // for empty area + State.AllocateStack(4, Align(8)); // for empty area // Use align=4 for value to place it at just after the dummy area. - unsigned Offset = State.AllocateStack(4, 4); // for float value area + unsigned Offset = State.AllocateStack(4, Align(4)); // for float value area State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); return true; } @@ -147,7 +147,7 @@ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext()); // Allocate the preserved area first. - CCInfo.AllocateStack(ArgsPreserved, 8); + CCInfo.AllocateStack(ArgsPreserved, Align(8)); // We already allocated the preserved area, so the stack offset computed // by CC_VE would be correct now. CCInfo.AnalyzeFormalArguments(Ins, CC_VE); @@ -267,7 +267,7 @@ CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext()); // Allocate the preserved area first. - CCInfo.AllocateStack(ArgsPreserved, 8); + CCInfo.AllocateStack(ArgsPreserved, Align(8)); // We already allocated the preserved area, so the stack offset computed // by CC_VE would be correct now. CCInfo.AnalyzeCallOperands(CLI.Outs, CC_VE); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -840,10 +840,10 @@ EVT VT = Arg.getValueType(); assert(VT != MVT::iPTR && "Legalized args should be concrete"); Type *Ty = VT.getTypeForEVT(*DAG.getContext()); - unsigned Align = std::max(Out.Flags.getOrigAlign(), - Layout.getABITypeAlignment(Ty)); - unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), - Align); + Align Alignment = + std::max(Align(Out.Flags.getOrigAlign()), Layout.getABITypeAlign(Ty)); + unsigned Offset = + CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment); CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(), Offset, VT.getSimpleVT(), CCValAssign::Full)); diff --git a/llvm/lib/Target/X86/X86CallingConv.cpp b/llvm/lib/Target/X86/X86CallingConv.cpp --- a/llvm/lib/Target/X86/X86CallingConv.cpp +++ b/llvm/lib/Target/X86/X86CallingConv.cpp @@ -166,7 +166,7 @@ State.getMachineFunction().getSubtarget().getRegisterInfo(); if (TRI->regsOverlap(Reg, X86::XMM4) || TRI->regsOverlap(Reg, X86::XMM5)) - State.AllocateStack(8, 8); + State.AllocateStack(8, Align(8)); if (!ArgFlags.isHva()) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); @@ -281,7 +281,7 @@ if (UseRegs) It.convertToReg(State.AllocateReg(RegList[FirstFree++])); else - It.convertToMem(State.AllocateStack(4, 4)); + It.convertToMem(State.AllocateStack(4, Align(4))); State.addLoc(It); } @@ -305,7 +305,7 @@ if (ArgCount == 1 && ValNo == 0) { // If we have one argument, the argument is five stack slots big, at fixed // offset zero. - Offset = State.AllocateStack(5 * SlotSize, 4); + Offset = State.AllocateStack(5 * SlotSize, Align(4)); } else if (ArgCount == 2 && ValNo == 0) { // If we have two arguments, the stack slot is *after* the error code // argument. Pretend it doesn't consume stack space, and account for it when @@ -316,7 +316,7 @@ // appears first on the stack, and is then followed by the five slot // interrupt struct. Offset = 0; - (void)State.AllocateStack(6 * SlotSize, 4); + (void)State.AllocateStack(6 * SlotSize, Align(4)); } else { report_fatal_error("unsupported x86 interrupt prototype"); } diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -3304,7 +3304,7 @@ // Allocate shadow area for Win64 if (IsWin64) - CCInfo.AllocateStack(32, 8); + CCInfo.AllocateStack(32, Align(8)); CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -3559,7 +3559,7 @@ // Allocate shadow area for Win64. if (IsWin64) - CCInfo.AllocateStack(32, 8); + CCInfo.AllocateStack(32, Align(8)); CCInfo.AnalyzeArguments(Ins, CC_X86); @@ -3898,7 +3898,7 @@ // Allocate shadow area for Win64. if (IsWin64) - CCInfo.AllocateStack(32, 8); + CCInfo.AllocateStack(32, Align(8)); CCInfo.AnalyzeArguments(Outs, CC_X86); @@ -4631,7 +4631,7 @@ // Allocate shadow area for Win64 if (IsCalleeWin64) - CCInfo.AllocateStack(32, 8); + CCInfo.AllocateStack(32, Align(8)); CCInfo.AnalyzeCallOperands(Outs, CC_X86); StackArgsSize = CCInfo.getNextStackOffset(); diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp --- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp +++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp @@ -1119,7 +1119,7 @@ // The ABI dictates there should be one stack slot available to the callee // on function entry (for saving lr). - CCInfo.AllocateStack(4, 4); + CCInfo.AllocateStack(4, Align(4)); CCInfo.AnalyzeCallOperands(Outs, CC_XCore); @@ -1127,7 +1127,7 @@ // Analyze return values to determine the number of bytes of stack required. CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, *DAG.getContext()); - RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4); + RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), Align(4)); RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore); // Get a count of how many bytes are to be pushed on the stack. @@ -1455,7 +1455,7 @@ // Analyze return values. if (!isVarArg) - CCInfo.AllocateStack(XFI->getReturnStackOffset(), 4); + CCInfo.AllocateStack(XFI->getReturnStackOffset(), Align(4)); CCInfo.AnalyzeReturn(Outs, RetCC_XCore);