diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -130,7 +130,7 @@ const Function &F) { if (F.hasFnAttribute(Attribute::StackAlignment)) return F.getFnStackAlignment(); - return STI->getFrameLowering()->getStackAlignment(); + return STI->getFrameLowering()->getStackAlign().value(); } MachineFunction::MachineFunction(const Function &F, diff --git a/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/llvm/lib/CodeGen/PrologEpilogInserter.cpp --- a/llvm/lib/CodeGen/PrologEpilogInserter.cpp +++ b/llvm/lib/CodeGen/PrologEpilogInserter.cpp @@ -438,14 +438,14 @@ unsigned Size = RegInfo->getSpillSize(*RC); if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) { // Nope, just spill it anywhere convenient. - unsigned Align = RegInfo->getSpillAlignment(*RC); - unsigned StackAlign = TFI->getStackAlignment(); + Align Alignment(RegInfo->getSpillAlignment(*RC)); + Align StackAlign = TFI->getStackAlign(); // We may not be able to satisfy the desired alignment specification of // the TargetRegisterClass if the stack alignment is smaller. Use the // min. - Align = std::min(Align, StackAlign); - FrameIdx = MFI.CreateStackObject(Size, Align, true); + Alignment = std::min(Alignment, StackAlign); + FrameIdx = MFI.CreateStackObject(Size, Alignment, true); if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; } else { diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp --- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -85,7 +85,6 @@ TLI = MF->getSubtarget().getTargetLowering(); RegInfo = &MF->getRegInfo(); const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); - unsigned StackAlign = TFI->getStackAlignment(); DA = DAG->getDivergenceAnalysis(); // Check whether the function can return without sret-demotion. @@ -130,19 +129,19 @@ // Initialize the mapping of values to registers. This is only set up for // instruction values that are used outside of the block that defines // them. + Align StackAlign = TFI->getStackAlign(); for (const BasicBlock &BB : *Fn) { for (const Instruction &I : BB) { if (const AllocaInst *AI = dyn_cast(&I)) { Type *Ty = AI->getAllocatedType(); - unsigned Align = - std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty), - AI->getAlignment()); + Align Alignment = + max(MF->getDataLayout().getPrefTypeAlign(Ty), AI->getAlign()); // Static allocas can be folded into the initial stack frame // adjustment. For targets that don't realign the stack, don't // do this if there is an extra alignment requirement. if (AI->isStaticAlloca() && - (TFI->isStackRealignable() || (Align <= StackAlign))) { + (TFI->isStackRealignable() || (Alignment <= StackAlign))) { const ConstantInt *CUI = cast(AI->getArraySize()); uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty).getKnownMinSize(); @@ -154,10 +153,10 @@ if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) { FrameIndex = MF->getFrameInfo().CreateFixedObject( TySize, 0, /*IsImmutable=*/false, /*isAliased=*/true); - MF->getFrameInfo().setObjectAlignment(FrameIndex, Align); + MF->getFrameInfo().setObjectAlignment(FrameIndex, Alignment); } else { - FrameIndex = - MF->getFrameInfo().CreateStackObject(TySize, Align, false, AI); + FrameIndex = MF->getFrameInfo().CreateStackObject(TySize, Alignment, + false, AI); } // Scalable vectors may need a special StackID to distinguish @@ -176,10 +175,9 @@ // FIXME: Overaligned static allocas should be grouped into // a single dynamic allocation instead of using a separate // stack allocation for each one. - if (Align <= StackAlign) - Align = 0; // Inform the Frame Information that we have variable-sized objects. - MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, AI); + MF->getFrameInfo().CreateVariableSizedObject( + Alignment <= StackAlign ? 0 : Alignment.value(), AI); } } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4010,8 +4010,7 @@ const TargetLowering &TLI = DAG.getTargetLoweringInfo(); auto &DL = DAG.getDataLayout(); uint64_t TySize = DL.getTypeAllocSize(Ty); - unsigned Align = - std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment()); + Align Alignment = max(DL.getPrefTypeAlign(Ty), I.getAlign()); SDValue AllocSize = getValue(I.getArraySize()); @@ -4026,25 +4025,26 @@ // Handle alignment. If the requested alignment is less than or equal to // the stack alignment, ignore it. If the size is greater than or equal to // the stack alignment, we note this in the DYNAMIC_STACKALLOC node. - unsigned StackAlign = - DAG.getSubtarget().getFrameLowering()->getStackAlignment(); - if (Align <= StackAlign) - Align = 0; + Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign(); // Round the size of the allocation up to the stack alignment size // by add SA-1 to the size. This doesn't overflow because we're computing // an address inside an alloca. SDNodeFlags Flags; Flags.setNoUnsignedWrap(true); - AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize, - DAG.getConstant(StackAlign - 1, dl, IntPtr), Flags); - - // Mask out the low bits for alignment purposes. AllocSize = - DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize, - DAG.getConstant(~(uint64_t)(StackAlign - 1), dl, IntPtr)); + DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize, + DAG.getConstant(StackAlign.value() - 1, dl, IntPtr), Flags); - SDValue Ops[] = {getRoot(), AllocSize, DAG.getConstant(Align, dl, IntPtr)}; + // Mask out the low bits for alignment purposes. + AllocSize = DAG.getNode( + ISD::AND, dl, AllocSize.getValueType(), AllocSize, + DAG.getConstant(~(uint64_t)(StackAlign.value() - 1), dl, IntPtr)); + + SDValue Ops[] = { + getRoot(), AllocSize, + DAG.getConstant(Alignment <= StackAlign ? 0 : Alignment.value(), dl, + IntPtr)}; SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other); SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops); setValue(&I, DSA); diff --git a/llvm/lib/Target/AVR/AVRFrameLowering.cpp b/llvm/lib/Target/AVR/AVRFrameLowering.cpp --- a/llvm/lib/Target/AVR/AVRFrameLowering.cpp +++ b/llvm/lib/Target/AVR/AVRFrameLowering.cpp @@ -378,7 +378,7 @@ // For adjcallstackdown we convert it into an 'adiw reg, ' handling // the read and write of SP in I/O space. if (Amount != 0) { - assert(getStackAlignment() == 1 && "Unsupported stack alignment"); + assert(getStackAlign() == Align(1) && "Unsupported stack alignment"); if (Opcode == TII.getCallFrameSetupOpcode()) { fixStackStores(MBB, MI, TII, true); diff --git a/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp b/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp --- a/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp +++ b/llvm/lib/Target/MSP430/MSP430FrameLowering.cpp @@ -223,8 +223,6 @@ MachineBasicBlock::iterator I) const { const MSP430InstrInfo &TII = *static_cast(MF.getSubtarget().getInstrInfo()); - unsigned StackAlign = getStackAlignment(); - if (!hasReservedCallFrame(MF)) { // If the stack pointer can be changed after prologue, turn the // adjcallstackup instruction into a 'sub SP, ' and the @@ -236,7 +234,7 @@ // We need to keep the stack aligned properly. To do this, we round the // amount of space needed for the outgoing arguments up to the next // alignment boundary. - Amount = (Amount+StackAlign-1)/StackAlign*StackAlign; + Amount = alignTo(Amount, getStackAlign()); MachineInstr *New = nullptr; if (Old.getOpcode() == TII.getCallFrameSetupOpcode()) { diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp --- a/llvm/lib/Target/Mips/MipsCallLowering.cpp +++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp @@ -179,8 +179,9 @@ MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); - unsigned Align = MinAlign(TFL->getStackAlignment(), Offset); - MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align); + Align Alignment = commonAlignment(TFL->getStackAlign(), Offset); + MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, + Alignment.value()); return MIRBuilder.buildFrameIndex(LLT::pointer(0, 32), FI).getReg(0); } diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -498,9 +498,9 @@ // Get stack alignments. const PPCFrameLowering *TFI = getFrameLowering(MF); - unsigned TargetAlign = TFI->getStackAlignment(); - unsigned MaxAlign = MFI.getMaxAlign().value(); - assert((maxCallFrameSize & (MaxAlign-1)) == 0 && + Align TargetAlign = TFI->getStackAlign(); + Align MaxAlign = MFI.getMaxAlign(); + assert(isAligned(MaxAlign, maxCallFrameSize) && "Maximum call-frame size not sufficiently aligned"); // Determine the previous frame's address. If FrameSize can't be @@ -545,7 +545,7 @@ // Unfortunately, there is no andi, only andi., and we can't insert that // here because we might clobber cr0 while it is live. BuildMI(MBB, II, dl, TII.get(PPC::LI8), NegSizeReg) - .addImm(~(MaxAlign-1)); + .addImm(~(MaxAlign.value() - 1)); unsigned NegSizeReg1 = NegSizeReg; NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC); @@ -570,7 +570,7 @@ // Unfortunately, there is no andi, only andi., and we can't insert that // here because we might clobber cr0 while it is live. BuildMI(MBB, II, dl, TII.get(PPC::LI), NegSizeReg) - .addImm(~(MaxAlign-1)); + .addImm(~(MaxAlign.value() - 1)); unsigned NegSizeReg1 = NegSizeReg; NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC); diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp @@ -631,7 +631,6 @@ const MachineFrameInfo &MFI = MF.getFrameInfo(); const std::vector &CSI = MFI.getCalleeSavedInfo(); uint64_t StackSize = MFI.getStackSize(); - uint64_t StackAlign = getStackAlignment(); // Disable SplitSPAdjust if save-restore libcall used. The callee saved // registers will be pushed by the save-restore libcalls, so we don't have to @@ -648,7 +647,7 @@ // load/store instruction and we have to stick with the stack alignment. // 2048 is 16-byte alignment. The stack alignment for RV32 and RV64 is 16, // for RV32E is 4. So (2048 - StackAlign) will satisfy the stack alignment. - return 2048 - StackAlign; + return 2048 - getStackAlign().value(); } return 0; } diff --git a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp --- a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp +++ b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp @@ -198,7 +198,7 @@ if (CannotReserveFrame) return true; - unsigned StackAlign = TFL->getStackAlignment(); + Align StackAlign = TFL->getStackAlign(); int64_t Advantage = 0; for (auto CC : CallSeqVector) { @@ -221,7 +221,7 @@ // We'll need a add after the call. Advantage -= 3; // If we have to realign the stack, we'll also need a sub before - if (CC.ExpectedDist % StackAlign) + if (!isAligned(StackAlign, CC.ExpectedDist)) Advantage -= 3; // Now, for each push, we save ~3 bytes. For small constants, we actually, // save more (up to 5 bytes), but 3 should be a good approximation. diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -3938,10 +3938,8 @@ const X86InstrInfo &XII = (const X86InstrInfo &)TII; unsigned Size = DL.getTypeAllocSize(LI->getType()); - unsigned Alignment = LI->getAlignment(); - - if (Alignment == 0) // Ensure that codegen never sees alignment 0 - Alignment = DL.getABITypeAlignment(LI->getType()); + Align Alignment = + DL.getValueOrABITypeAlignment(LI->getAlign(), LI->getType()); SmallVector AddrOps; AM.getFullAddress(AddrOps); diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp --- a/llvm/lib/Target/X86/X86FrameLowering.cpp +++ b/llvm/lib/Target/X86/X86FrameLowering.cpp @@ -1764,7 +1764,7 @@ // RBP is not included in the callee saved register block. After pushing RBP, // everything is 16 byte aligned. Everything we allocate before an outgoing // call must also be 16 byte aligned. - unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlignment()); + unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlign()); // Subtract out the size of the callee saved registers. This is how much stack // each funclet will allocate. return FrameSizeMinusRBP + XMMSize - CSSize; @@ -2051,7 +2051,8 @@ return getFrameIndexReference(MF, FI, FrameReg); FrameReg = TRI->getStackRegister(); - return alignDown(MFI.getMaxCallFrameSize(), getStackAlignment()) + it->second; + return alignDown(MFI.getMaxCallFrameSize(), getStackAlign().value()) + + it->second; } int X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF, @@ -2996,8 +2997,7 @@ // We need to keep the stack aligned properly. To do this, we round the // amount of space needed for the outgoing arguments up to the next // alignment boundary. - unsigned StackAlign = getStackAlignment(); - Amount = alignTo(Amount, StackAlign); + Amount = alignTo(Amount, getStackAlign()); const Function &F = MF.getFunction(); bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI(); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4424,7 +4424,7 @@ unsigned X86TargetLowering::GetAlignedArgumentStackSize(const unsigned StackSize, SelectionDAG &DAG) const { - const Align StackAlignment(Subtarget.getFrameLowering()->getStackAlignment()); + const Align StackAlignment = Subtarget.getFrameLowering()->getStackAlign(); const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize(); assert(StackSize % SlotSize == 0 && "StackSize must be a multiple of SlotSize"); @@ -23320,7 +23320,7 @@ " not tell us which reg is the stack pointer!"); const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); - const Align StackAlign(TFI.getStackAlignment()); + const Align StackAlign = TFI.getStackAlign(); if (hasInlineStackProbe(MF)) { MachineRegisterInfo &MRI = MF.getRegInfo(); diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -474,7 +474,7 @@ unsigned OpNum, ArrayRef MOs, MachineBasicBlock::iterator InsertPt, - unsigned Size, unsigned Alignment, + unsigned Size, Align Alignment, bool AllowCommute) const; bool isHighLatencyDef(int opc) const override; @@ -594,7 +594,7 @@ unsigned OpNum, ArrayRef MOs, MachineBasicBlock::iterator InsertPt, - unsigned Size, unsigned Align) const; + unsigned Size, Align Alignment) const; /// isFrameOperand - Return true and the FrameIndex if the specified /// operand and follow operands form a reference to the stack frame. diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -625,8 +625,7 @@ const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); if (isFrameInstr(MI)) { - unsigned StackAlign = TFI->getStackAlignment(); - int SPAdj = alignTo(getFrameSize(MI), StackAlign); + int SPAdj = alignTo(getFrameSize(MI), TFI->getStackAlign()); SPAdj -= getFrameAdjustment(MI); if (!isFrameSetup(MI)) SPAdj = -SPAdj; @@ -3737,7 +3736,7 @@ "Stack slot too small for store"); unsigned Alignment = std::max(TRI->getSpillSize(*RC), 16); bool isAligned = - (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || + (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) || RI.canRealignStack(MF); unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc)), FrameIdx) @@ -3752,7 +3751,7 @@ const MachineFunction &MF = *MBB.getParent(); unsigned Alignment = std::max(TRI->getSpillSize(*RC), 16); bool isAligned = - (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || + (Subtarget.getFrameLowering()->getStackAlign() >= Alignment) || RI.canRealignStack(MF); unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), FrameIdx); @@ -5211,7 +5210,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom( MachineFunction &MF, MachineInstr &MI, unsigned OpNum, ArrayRef MOs, MachineBasicBlock::iterator InsertPt, - unsigned Size, unsigned Align) const { + unsigned Size, Align Alignment) const { switch (MI.getOpcode()) { case X86::INSERTPSrr: case X86::VINSERTPSrr: @@ -5227,7 +5226,7 @@ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; - if ((Size == 0 || Size >= 16) && RCSize >= 16 && 4 <= Align) { + if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(4)) { int PtrOffset = SrcIdx * 4; unsigned NewImm = (DstIdx << 4) | ZMask; unsigned NewOpCode = @@ -5251,7 +5250,7 @@ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; - if ((Size == 0 || Size >= 16) && RCSize >= 16 && 8 <= Align) { + if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment >= Align(8)) { unsigned NewOpCode = (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm : (MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm : @@ -5270,7 +5269,7 @@ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8; - if ((Size == 0 || Size >= 16) && RCSize >= 16 && Align < 16) { + if ((Size == 0 || Size >= 16) && RCSize >= 16 && Alignment < Align(16)) { MachineInstr *NewMI = FuseInst(MF, X86::MOVHPDrm, OpNum, MOs, InsertPt, MI, *this); return NewMI; @@ -5302,11 +5301,10 @@ return VRegDef && VRegDef->isImplicitDef(); } - MachineInstr *X86InstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr &MI, unsigned OpNum, ArrayRef MOs, MachineBasicBlock::iterator InsertPt, - unsigned Size, unsigned Align, bool AllowCommute) const { + unsigned Size, Align Alignment, bool AllowCommute) const { bool isSlowTwoMemOps = Subtarget.slowTwoMemOps(); bool isTwoAddrFold = false; @@ -5346,8 +5344,8 @@ MachineInstr *NewMI = nullptr; // Attempt to fold any custom cases we have. - if (MachineInstr *CustomMI = - foldMemoryOperandCustom(MF, MI, OpNum, MOs, InsertPt, Size, Align)) + if (MachineInstr *CustomMI = foldMemoryOperandCustom( + MF, MI, OpNum, MOs, InsertPt, Size, Alignment)) return CustomMI; const X86MemoryFoldTableEntry *I = nullptr; @@ -5374,9 +5372,9 @@ if (I != nullptr) { unsigned Opcode = I->DstOp; - unsigned MinAlign = (I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT; - MinAlign = MinAlign ? 1 << (MinAlign - 1) : 0; - if (Align < MinAlign) + MaybeAlign MinAlign = + decodeMaybeAlign((I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT); + if (MinAlign && Alignment < *MinAlign) return nullptr; bool NarrowToMOV32rm = false; if (Size) { @@ -5451,8 +5449,8 @@ } // Attempt to fold with the commuted version of the instruction. - NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, - Size, Align, /*AllowCommute=*/false); + NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt, Size, + Alignment, /*AllowCommute=*/false); if (NewMI) return NewMI; @@ -5506,12 +5504,12 @@ const MachineFrameInfo &MFI = MF.getFrameInfo(); unsigned Size = MFI.getObjectSize(FrameIndex); - unsigned Alignment = MFI.getObjectAlignment(FrameIndex); + Align Alignment = MFI.getObjectAlign(FrameIndex); // If the function stack isn't realigned we don't want to fold instructions // that need increased alignment. if (!RI.needsStackRealignment(MF)) Alignment = - std::min(Alignment, Subtarget.getFrameLowering()->getStackAlignment()); + std::min(Alignment, Subtarget.getFrameLowering()->getStackAlign()); if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { unsigned NewOpc = 0; unsigned RCSize = 0; @@ -5811,36 +5809,36 @@ return nullptr; // Determine the alignment of the load. - unsigned Alignment = 0; + Align Alignment; if (LoadMI.hasOneMemOperand()) - Alignment = (*LoadMI.memoperands_begin())->getAlignment(); + Alignment = Align((*LoadMI.memoperands_begin())->getAlignment()); else switch (LoadMI.getOpcode()) { case X86::AVX512_512_SET0: case X86::AVX512_512_SETALLONES: - Alignment = 64; + Alignment = Align(64); break; case X86::AVX2_SETALLONES: case X86::AVX1_SETALLONES: case X86::AVX_SET0: case X86::AVX512_256_SET0: - Alignment = 32; + Alignment = Align(32); break; case X86::V_SET0: case X86::V_SETALLONES: case X86::AVX512_128_SET0: case X86::FsFLD0F128: case X86::AVX512_FsFLD0F128: - Alignment = 16; + Alignment = Align(16); break; case X86::MMX_SET0: case X86::FsFLD0SD: case X86::AVX512_FsFLD0SD: - Alignment = 8; + Alignment = Align(8); break; case X86::FsFLD0SS: case X86::AVX512_FsFLD0SS: - Alignment = 4; + Alignment = Align(4); break; default: return nullptr; @@ -5929,7 +5927,7 @@ Opc == X86::AVX1_SETALLONES); const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) : Constant::getNullValue(Ty); - unsigned CPI = MCP.getConstantPoolIndex(C, Alignment); + unsigned CPI = MCP.getConstantPoolIndex(C, Alignment.value()); // Create operands to load from the constant pool entry. MOs.push_back(MachineOperand::CreateReg(PICBase, false));