diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1525,13 +1525,14 @@ } Lo = DAG.getLoad(ISD::UNINDEXED, ExtType, LoVT, dl, Ch, Ptr, Offset, - LD->getPointerInfo(), LoMemVT, Alignment, MMOFlags, AAInfo); + LD->getPointerInfo(), LoMemVT, Alignment.value(), MMOFlags, + AAInfo); unsigned IncrementSize = LoMemVT.getSizeInBits()/8; Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); Hi = DAG.getLoad(ISD::UNINDEXED, ExtType, HiVT, dl, Ch, Ptr, Offset, LD->getPointerInfo().getWithOffset(IncrementSize), HiMemVT, - Alignment, MMOFlags, AAInfo); + Alignment.value(), MMOFlags, AAInfo); // Build a factor node to remember that this load is independent of the // other one. @@ -1650,7 +1651,7 @@ MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MGT->getPointerInfo(), MachineMemOperand::MOLoad, - MemoryLocation::UnknownSize, Alignment, MGT->getAAInfo(), + MemoryLocation::UnknownSize, Alignment.value(), MGT->getAAInfo(), MGT->getRanges()); SDValue OpsLo[] = {Ch, PassThruLo, MaskLo, Ptr, IndexLo, Scale}; @@ -2409,7 +2410,8 @@ SDValue Lo; MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( N->getPointerInfo(), MachineMemOperand::MOStore, - MemoryLocation::UnknownSize, Alignment, N->getAAInfo(), N->getRanges()); + MemoryLocation::UnknownSize, Alignment.value(), N->getAAInfo(), + N->getRanges()); SDValue OpsLo[] = {Ch, DataLo, MaskLo, Ptr, IndexLo, Scale}; Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataLo.getValueType(), @@ -2449,10 +2451,10 @@ if (isTruncating) Lo = DAG.getTruncStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), LoMemVT, - Alignment, MMOFlags, AAInfo); + Alignment.value(), MMOFlags, AAInfo); else - Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), Alignment, MMOFlags, - AAInfo); + Lo = DAG.getStore(Ch, DL, Lo, Ptr, N->getPointerInfo(), Alignment.value(), + MMOFlags, AAInfo); // Increment the pointer to the other half. Ptr = DAG.getObjectPtrOffset(DL, Ptr, IncrementSize); @@ -2460,11 +2462,11 @@ if (isTruncating) Hi = DAG.getTruncStore(Ch, DL, Hi, Ptr, N->getPointerInfo().getWithOffset(IncrementSize), - HiMemVT, Alignment, MMOFlags, AAInfo); + HiMemVT, Alignment.value(), MMOFlags, AAInfo); else Hi = DAG.getStore(Ch, DL, Hi, Ptr, N->getPointerInfo().getWithOffset(IncrementSize), - Alignment, MMOFlags, AAInfo); + Alignment.value(), MMOFlags, AAInfo); return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); } diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -1464,12 +1464,10 @@ // TODO: can we be smarter about machine pointer info? MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); MachineMemOperand *MMO = MF.getMachineMemOperand( - PtrInfo, - MachineMemOperand::MOLoad | - MachineMemOperand::MODereferenceable | - MachineMemOperand::MOInvariant, - 4, - MinAlign(64, StructOffset)); + PtrInfo, + MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | + MachineMemOperand::MOInvariant, + 4, commonAlignment(Align(64), StructOffset)); Register LoadAddr; @@ -2028,10 +2026,10 @@ Register GOTAddr = MRI.createGenericVirtualRegister(PtrTy); MachineMemOperand *GOTMMO = MF.getMachineMemOperand( - MachinePointerInfo::getGOT(MF), - MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | - MachineMemOperand::MOInvariant, - 8 /*Size*/, 8 /*Align*/); + MachinePointerInfo::getGOT(MF), + MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | + MachineMemOperand::MOInvariant, + 8 /*Size*/, Align(8)); buildPCRelGlobalAddress(GOTAddr, PtrTy, B, GV, 0, SIInstrInfo::MO_GOTPCREL32); @@ -4003,11 +4001,12 @@ // FIXME: When intrinsic definition is fixed, this should have an MMO already. // TODO: Should this use datalayout alignment? const unsigned MemSize = (Size + 7) / 8; - const unsigned MemAlign = 4; + const Align MemAlign(4); MachineMemOperand *MMO = MF.getMachineMemOperand( - MachinePointerInfo(), - MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | - MachineMemOperand::MOInvariant, MemSize, MemAlign); + MachinePointerInfo(), + MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | + MachineMemOperand::MOInvariant, + MemSize, MemAlign); MI.addMemOperand(MF, MMO); // There are no 96-bit result scalar loads, but widening to 128-bit should diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -1352,7 +1352,7 @@ // Use the alignment to ensure that the required offsets will fit into the // immediate offsets. - const unsigned Align = NumLoads > 1 ? 16 * NumLoads : 1; + const unsigned Alignment = NumLoads > 1 ? 16 * NumLoads : 1; MachineIRBuilder B(MI); MachineFunction &MF = B.getMF(); @@ -1362,12 +1362,12 @@ int64_t ImmOffset = 0; unsigned MMOOffset = setBufferOffsets(B, *this, MI.getOperand(2).getReg(), - VOffset, SOffset, ImmOffset, Align); + VOffset, SOffset, ImmOffset, Alignment); // TODO: 96-bit loads were widened to 128-bit results. Shrink the result if we // can, but we neeed to track an MMO for that. const unsigned MemSize = (Ty.getSizeInBits() + 7) / 8; - const unsigned MemAlign = 4; // FIXME: ABI type alignment? + const Align MemAlign(4); // FIXME: ABI type alignment? MachineMemOperand *BaseMMO = MF.getMachineMemOperand( MachinePointerInfo(), MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp @@ -94,7 +94,7 @@ MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, 4, - MFI.getObjectAlignment(FI)); + MFI.getObjectAlign(FI)); if (isUInt<12>(Offset)) { BuildMI(MBB, I, DebugLoc(), TII->get(AMDGPU::BUFFER_STORE_DWORD_OFFSET)) @@ -141,7 +141,7 @@ MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, 4, - MFI.getObjectAlignment(FI)); + MFI.getObjectAlign(FI)); if (isUInt<12>(Offset)) { BuildMI(MBB, I, DebugLoc(), @@ -462,9 +462,9 @@ const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM); auto MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad | - MachineMemOperand::MOInvariant | - MachineMemOperand::MODereferenceable, - 16, 4); + MachineMemOperand::MOInvariant | + MachineMemOperand::MODereferenceable, + 16, Align(4)); unsigned Offset = Fn.getCallingConv() == CallingConv::AMDGPU_CS ? 16 : 0; const GCNSubtarget &Subtarget = MF.getSubtarget(); unsigned EncodedOffset = AMDGPU::convertSMRDOffsetUnits(Subtarget, Offset); @@ -499,11 +499,11 @@ const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS); - auto MMO = MF.getMachineMemOperand(PtrInfo, - MachineMemOperand::MOLoad | - MachineMemOperand::MOInvariant | - MachineMemOperand::MODereferenceable, - 8, 4); + auto MMO = MF.getMachineMemOperand( + PtrInfo, + MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | + MachineMemOperand::MODereferenceable, + 8, Align(4)); BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) .addReg(MFI->getImplicitBufferPtrUserSGPR()) .addImm(0) // offset diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -5668,14 +5668,14 @@ MachineFunction &MF = DAG.getMachineFunction(); const DataLayout &DataLayout = DAG.getDataLayout(); - unsigned Align = - DataLayout.getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); + Align Alignment = + DataLayout.getABITypeAlign(VT.getTypeForEVT(*DAG.getContext())); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo(), MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | MachineMemOperand::MOInvariant, - VT.getStoreSize(), Align); + VT.getStoreSize(), Alignment); if (!Offset->isDivergent()) { SDValue Ops[] = { diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -1132,13 +1132,11 @@ MachineFrameInfo &FrameInfo = MF->getFrameInfo(); const DebugLoc &DL = MBB.findDebugLoc(MI); - unsigned Size = FrameInfo.getObjectSize(FrameIndex); - unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, FrameIndex); - MachineMemOperand *MMO - = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, - Size, Align); + MachineMemOperand *MMO = MF->getMachineMemOperand( + PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), + FrameInfo.getObjectAlign(FrameIndex)); unsigned SpillSize = TRI->getSpillSize(*RC); if (RI.isSGPRClass(RC)) { @@ -1260,15 +1258,14 @@ SIMachineFunctionInfo *MFI = MF->getInfo(); MachineFrameInfo &FrameInfo = MF->getFrameInfo(); const DebugLoc &DL = MBB.findDebugLoc(MI); - unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); - unsigned Size = FrameInfo.getObjectSize(FrameIndex); unsigned SpillSize = TRI->getSpillSize(*RC); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, FrameIndex); MachineMemOperand *MMO = MF->getMachineMemOperand( - PtrInfo, MachineMemOperand::MOLoad, Size, Align); + PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), + FrameInfo.getObjectAlign(FrameIndex)); if (RI.isSGPRClass(RC)) { MFI->setHasSpilledSGPRs(); diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -673,7 +673,7 @@ int64_t Offset = InstOffset + MFI.getObjectOffset(Index); int64_t ScratchOffsetRegDelta = 0; - unsigned Align = MFI.getObjectAlignment(Index); + Align Alignment = MFI.getObjectAlign(Index); const MachinePointerInfo &BasePtrInfo = MMO->getPointerInfo(); Register TmpReg = @@ -749,9 +749,9 @@ } MachinePointerInfo PInfo = BasePtrInfo.getWithOffset(EltSize * i); - MachineMemOperand *NewMMO - = MF->getMachineMemOperand(PInfo, MMO->getFlags(), - EltSize, MinAlign(Align, EltSize * i)); + MachineMemOperand *NewMMO = + MF->getMachineMemOperand(PInfo, MMO->getFlags(), EltSize, + commonAlignment(Alignment, EltSize * i)); MIB = BuildMI(*MBB, MI, DL, Desc) .addReg(SubReg, @@ -877,12 +877,12 @@ Mov.addReg(SuperReg, RegState::Implicit | SuperKillState); } - unsigned Align = FrameInfo.getObjectAlignment(Index); + Align Alignment = FrameInfo.getObjectAlign(Index); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i); - MachineMemOperand *MMO - = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, - EltSize, MinAlign(Align, EltSize * i)); + MachineMemOperand *MMO = + MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, EltSize, + commonAlignment(Alignment, EltSize * i)); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_SAVE)) .addReg(TmpVGPR, RegState::Kill) // src .addFrameIndex(Index) // vaddr @@ -951,14 +951,14 @@ // FIXME: We should use S_LOAD_DWORD here for VI. if (!TmpVGPR.isValid()) TmpVGPR = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, 0); - unsigned Align = FrameInfo.getObjectAlignment(Index); + Align Alignment = FrameInfo.getObjectAlign(Index); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(*MF, Index, EltSize * i); - MachineMemOperand *MMO = MF->getMachineMemOperand(PtrInfo, - MachineMemOperand::MOLoad, EltSize, - MinAlign(Align, EltSize * i)); + MachineMemOperand *MMO = + MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad, EltSize, + commonAlignment(Alignment, EltSize * i)); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_RESTORE), TmpVGPR) .addFrameIndex(Index) // vaddr diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1076,11 +1076,11 @@ const TargetRegisterInfo *TRI) const { MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned Align = MFI.getObjectAlignment(FI); + Align Alignment = MFI.getObjectAlign(FI); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, - MFI.getObjectSize(FI), Align); + MFI.getObjectSize(FI), Alignment); switch (TRI->getSpillSize(*RC)) { case 2: @@ -1150,7 +1150,7 @@ case 16: if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) { // Use aligned spills if the stack can be realigned. - if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { + if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF)) { BuildMI(MBB, I, DebugLoc(), get(ARM::VST1q64)) .addFrameIndex(FI) .addImm(16) @@ -1178,7 +1178,7 @@ case 24: if (ARM::DTripleRegClass.hasSubClassEq(RC)) { // Use aligned spills if the stack can be realigned. - if (Align >= 16 && getRegisterInfo().canRealignStack(MF) && + if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && Subtarget.hasNEON()) { BuildMI(MBB, I, DebugLoc(), get(ARM::VST1d64TPseudo)) .addFrameIndex(FI) @@ -1201,7 +1201,7 @@ break; case 32: if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) { - if (Align >= 16 && getRegisterInfo().canRealignStack(MF) && + if (Alignment >= 16 && getRegisterInfo().canRealignStack(MF) && Subtarget.hasNEON()) { // FIXME: It's possible to only store part of the QQ register if the // spilled def has a sub-register index. diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -3400,7 +3400,7 @@ MachineFunction& MF = CurDAG->getMachineFunction(); MachineMemOperand *MemOp = MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF), - MachineMemOperand::MOLoad, 4, 4); + MachineMemOperand::MOLoad, 4, Align(4)); CurDAG->setNodeMemRefs(cast(ResNode), {MemOp}); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -9578,11 +9578,11 @@ // Grab constant pool and fixed stack memory operands. MachineMemOperand *CPMMO = MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), - MachineMemOperand::MOLoad, 4, 4); + MachineMemOperand::MOLoad, 4, Align(4)); MachineMemOperand *FIMMOSt = MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI), - MachineMemOperand::MOStore, 4, 4); + MachineMemOperand::MOStore, 4, Align(4)); // Load the address of the dispatch MBB into the jump buffer. if (isThumb2) { @@ -10233,7 +10233,7 @@ Register dest = MI.getOperand(0).getReg(); Register src = MI.getOperand(1).getReg(); unsigned SizeVal = MI.getOperand(2).getImm(); - unsigned Align = MI.getOperand(3).getImm(); + unsigned Alignment = MI.getOperand(3).getImm(); DebugLoc dl = MI.getDebugLoc(); MachineFunction *MF = BB->getParent(); @@ -10246,17 +10246,17 @@ bool IsThumb2 = Subtarget->isThumb2(); bool IsThumb = Subtarget->isThumb(); - if (Align & 1) { + if (Alignment & 1) { UnitSize = 1; - } else if (Align & 2) { + } else if (Alignment & 2) { UnitSize = 2; } else { // Check whether we can use NEON instructions. if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) && Subtarget->hasNEON()) { - if ((Align % 16 == 0) && SizeVal >= 16) + if ((Alignment % 16 == 0) && SizeVal >= 16) UnitSize = 16; - else if ((Align % 8 == 0) && SizeVal >= 8) + else if ((Alignment % 8 == 0) && SizeVal >= 8) UnitSize = 8; } // Can't use NEON instructions. @@ -10362,13 +10362,11 @@ const Constant *C = ConstantInt::get(Int32Ty, LoopSize); // MachineConstantPool wants an explicit alignment. - unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty); - if (Align == 0) - Align = MF->getDataLayout().getTypeAllocSize(C->getType()); - unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); + Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty); + unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment.value()); MachineMemOperand *CPMMO = MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF), - MachineMemOperand::MOLoad, 4, 4); + MachineMemOperand::MOLoad, 4, Align(4)); if (IsThumb) BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)) diff --git a/llvm/lib/Target/AVR/AVRInstrInfo.cpp b/llvm/lib/Target/AVR/AVRInstrInfo.cpp --- a/llvm/lib/Target/AVR/AVRInstrInfo.cpp +++ b/llvm/lib/Target/AVR/AVRInstrInfo.cpp @@ -138,7 +138,7 @@ MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIndex), MachineMemOperand::MOStore, MFI.getObjectSize(FrameIndex), - MFI.getObjectAlignment(FrameIndex)); + MFI.getObjectAlign(FrameIndex)); unsigned Opcode = 0; if (TRI->isTypeLegalForClass(*RC, MVT::i8)) { @@ -172,7 +172,7 @@ MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIndex), MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIndex), - MFI.getObjectAlignment(FrameIndex)); + MFI.getObjectAlign(FrameIndex)); unsigned Opcode = 0; if (TRI->isTypeLegalForClass(*RC, MVT::i8)) { diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -893,7 +893,7 @@ // Create a dummy memory operand to avoid allocframe from being treated as // a volatile memory reference. auto *MMO = MF.getMachineMemOperand(MachinePointerInfo::getStack(MF, 0), - MachineMemOperand::MOStore, 4, 4); + MachineMemOperand::MOStore, 4, Align(4)); DebugLoc dl = MBB.findDebugLoc(InsertPt); unsigned SP = HRI.getStackRegister(); @@ -1547,12 +1547,11 @@ if (auto *FS = dyn_cast_or_null(PV)) { int FI = FS->getFrameIndex(); if (DealignSlots.count(FI)) { - unsigned A = MFI.getObjectAlignment(FI); - auto *NewMMO = MF.getMachineMemOperand(MMO->getPointerInfo(), - MMO->getFlags(), MMO->getSize(), A, - MMO->getAAInfo(), MMO->getRanges(), - MMO->getSyncScopeID(), MMO->getOrdering(), - MMO->getFailureOrdering()); + auto *NewMMO = MF.getMachineMemOperand( + MMO->getPointerInfo(), MMO->getFlags(), MMO->getSize(), + MFI.getObjectAlign(FI), MMO->getAAInfo(), MMO->getRanges(), + MMO->getSyncScopeID(), MMO->getOrdering(), + MMO->getFailureOrdering()); new_memops.push_back(NewMMO); KeepOld = false; continue; diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -2909,10 +2909,10 @@ MachineMemOperand *WideMMO = nullptr; if (MachineMemOperand *MMO = LN->getMemOperand()) { MachineFunction &MF = DAG.getMachineFunction(); - WideMMO = MF.getMachineMemOperand(MMO->getPointerInfo(), MMO->getFlags(), - 2*LoadLen, LoadLen, MMO->getAAInfo(), MMO->getRanges(), - MMO->getSyncScopeID(), MMO->getOrdering(), - MMO->getFailureOrdering()); + WideMMO = MF.getMachineMemOperand( + MMO->getPointerInfo(), MMO->getFlags(), 2 * LoadLen, Align(LoadLen), + MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), + MMO->getOrdering(), MMO->getFailureOrdering()); } SDValue Load0 = DAG.getLoad(LoadTy, dl, Chain, Base0, WideMMO); diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -916,12 +916,11 @@ DebugLoc DL = MBB.findDebugLoc(I); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned SlotAlign = MFI.getObjectAlignment(FI); unsigned KillFlag = getKillRegState(isKill); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, - MFI.getObjectSize(FI), SlotAlign); + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) { BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io)) @@ -963,11 +962,10 @@ DebugLoc DL = MBB.findDebugLoc(I); MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned SlotAlign = MFI.getObjectAlignment(FI); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, - MFI.getObjectSize(FI), SlotAlign); + MFI.getObjectSize(FI), MFI.getObjectAlign(FI)); if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) { BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg) @@ -1373,7 +1371,8 @@ static const CrashPseudoSourceValue CrashPSV(*this); MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo(&CrashPSV), - MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 8, 1); + MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 8, + Align(1)); BuildMI(MBB, MI, DL, get(Hexagon::PS_loadrdabs), Hexagon::D13) .addImm(0xBADC0FEE) // Misaligned load. .addMemOperand(MMO); diff --git a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp --- a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp +++ b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp @@ -46,7 +46,7 @@ MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIdx), MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), - MFI.getObjectAlignment(FrameIdx)); + MFI.getObjectAlign(FrameIdx)); if (RC == &MSP430::GR16RegClass) BuildMI(MBB, MI, DL, get(MSP430::MOV16mr)) diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp --- a/llvm/lib/Target/Mips/MipsCallLowering.cpp +++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp @@ -271,8 +271,9 @@ MachinePointerInfo MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; - unsigned Align = MinAlign(TFL->getStackAlignment(), Offset); - MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Align); + Align Alignment = commonAlignment(TFL->getStackAlign(), Offset); + MMO = + MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size, Alignment); return AddrReg.getReg(0); } @@ -485,9 +486,8 @@ MachinePointerInfo MPO = MachinePointerInfo::getFixedStack(MF, FI); MachineInstrBuilder FrameIndex = MIRBuilder.buildFrameIndex(LLT::pointer(MPO.getAddrSpace(), 32), FI); - MachineMemOperand *MMO = - MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, RegSize, - /* Alignment */ RegSize); + MachineMemOperand *MMO = MF.getMachineMemOperand( + MPO, MachineMemOperand::MOStore, RegSize, Align(RegSize)); MIRBuilder.buildStore(Copy, FrameIndex, *MMO); } } diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp --- a/llvm/lib/Target/Mips/MipsFastISel.cpp +++ b/llvm/lib/Target/Mips/MipsFastISel.cpp @@ -795,12 +795,11 @@ } if (Addr.isFIBase()) { unsigned FI = Addr.getFI(); - unsigned Align = 4; int64_t Offset = Addr.getOffset(); MachineFrameInfo &MFI = MF->getFrameInfo(); MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad, - MFI.getObjectSize(FI), Align); + MFI.getObjectSize(FI), Align(4)); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) .addFrameIndex(FI) .addImm(Offset) @@ -846,12 +845,11 @@ } if (Addr.isFIBase()) { unsigned FI = Addr.getFI(); - unsigned Align = 4; int64_t Offset = Addr.getOffset(); MachineFrameInfo &MFI = MF->getFrameInfo(); MachineMemOperand *MMO = MF->getMachineMemOperand( MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore, - MFI.getObjectSize(FI), Align); + MFI.getObjectSize(FI), Align(4)); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) .addReg(SrcReg) .addFrameIndex(FI) @@ -1263,7 +1261,7 @@ Addr.setReg(Mips::SP); Addr.setOffset(VA.getLocMemOffset() + BEAlign); - unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType()); + Align Alignment = DL.getABITypeAlign(ArgVal->getType()); MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()), MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment); diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/llvm/lib/Target/Mips/MipsInstrInfo.cpp --- a/llvm/lib/Target/Mips/MipsInstrInfo.cpp +++ b/llvm/lib/Target/Mips/MipsInstrInfo.cpp @@ -66,10 +66,10 @@ MachineMemOperand::Flags Flags) const { MachineFunction &MF = *MBB.getParent(); MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned Align = MFI.getObjectAlignment(FI); return MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI), - Flags, MFI.getObjectSize(FI), Align); + Flags, MFI.getObjectSize(FI), + MFI.getObjectAlign(FI)); } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp --- a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp +++ b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp @@ -393,7 +393,7 @@ .addUse(DestAddress) .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO) .addMemOperand(MF.getMachineMemOperand( - MachinePointerInfo(), MachineMemOperand::MOLoad, 4, 4)); + MachinePointerInfo(), MachineMemOperand::MOLoad, 4, Align(4))); if (!constrainSelectedInstRegOperands(*LW, TII, TRI, RBI)) return false; @@ -681,7 +681,7 @@ LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT); LWGOT->addMemOperand( MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF), - MachineMemOperand::MOLoad, 4, 4)); + MachineMemOperand::MOLoad, 4, Align(4))); if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI)) return false; @@ -727,9 +727,9 @@ .addReg(MF.getInfo() ->getGlobalBaseRegForGlobalISel()) .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT) - .addMemOperand( - MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF), - MachineMemOperand::MOLoad, 4, 4)); + .addMemOperand(MF.getMachineMemOperand( + MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad, 4, + Align(4))); } else { MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi)) diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp --- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp +++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp @@ -525,12 +525,13 @@ } case Intrinsic::vacopy: { MachinePointerInfo MPO; - auto Tmp = MIRBuilder.buildLoad(LLT::pointer(0, 32), MI.getOperand(2), - *MI.getMF()->getMachineMemOperand( - MPO, MachineMemOperand::MOLoad, 4, 4)); + auto Tmp = + MIRBuilder.buildLoad(LLT::pointer(0, 32), MI.getOperand(2), + *MI.getMF()->getMachineMemOperand( + MPO, MachineMemOperand::MOLoad, 4, Align(4))); MIRBuilder.buildStore(Tmp, MI.getOperand(1), *MI.getMF()->getMachineMemOperand( - MPO, MachineMemOperand::MOStore, 4, 4)); + MPO, MachineMemOperand::MOStore, 4, Align(4))); MI.eraseFromParent(); return true; } diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -536,7 +536,7 @@ MachinePointerInfo::getFixedStack(*FuncInfo.MF, Addr.Base.FI, Addr.Offset), MachineMemOperand::MOLoad, MFI.getObjectSize(Addr.Base.FI), - MFI.getObjectAlignment(Addr.Base.FI)); + MFI.getObjectAlign(Addr.Base.FI)); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) .addImm(Addr.Offset).addFrameIndex(Addr.Base.FI).addMemOperand(MMO); @@ -682,7 +682,7 @@ MachinePointerInfo::getFixedStack(*FuncInfo.MF, Addr.Base.FI, Addr.Offset), MachineMemOperand::MOStore, MFI.getObjectSize(Addr.Base.FI), - MFI.getObjectAlignment(Addr.Base.FI)); + MFI.getObjectAlign(Addr.Base.FI)); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)) .addReg(SrcReg) @@ -1996,9 +1996,9 @@ return 0; // All FP constants are loaded from the constant pool. - unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); - assert(Align > 0 && "Unexpectedly missing alignment information!"); - unsigned Idx = MCP.getConstantPoolIndex(cast(CFP), Align); + Align Alignment = DL.getPrefTypeAlign(CFP->getType()); + unsigned Idx = + MCP.getConstantPoolIndex(cast(CFP), Alignment.value()); const bool HasSPE = PPCSubTarget->hasSPE(); const TargetRegisterClass *RC; if (HasSPE) @@ -2011,7 +2011,7 @@ MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( MachinePointerInfo::getConstantPool(*FuncInfo.MF), - MachineMemOperand::MOLoad, (VT == MVT::f32) ? 4 : 8, Align); + MachineMemOperand::MOLoad, (VT == MVT::f32) ? 4 : 8, Alignment); unsigned Opc; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -991,7 +991,7 @@ MachinePointerInfo MPI; bool IsDereferenceable = false; bool IsInvariant = false; - unsigned Alignment = 0; + Align Alignment; AAMDNodes AAInfo; const MDNode *Ranges = nullptr; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -8054,7 +8054,7 @@ RLI.MPI = LD->getPointerInfo(); RLI.IsDereferenceable = LD->isDereferenceable(); RLI.IsInvariant = LD->isInvariant(); - RLI.Alignment = LD->getAlignment(); + RLI.Alignment = LD->getAlign(); RLI.AAInfo = LD->getAAInfo(); RLI.Ranges = LD->getRanges(); @@ -8380,7 +8380,7 @@ RLI.Chain = Store; RLI.MPI = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); - RLI.Alignment = 4; + RLI.Alignment = Align(4); MachineMemOperand *MMO = MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, @@ -8432,7 +8432,7 @@ RLI.Chain = Store; RLI.MPI = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); - RLI.Alignment = 4; + RLI.Alignment = Align(4); } MachineMemOperand *MMO = diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -1238,7 +1238,7 @@ MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIdx), MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), - MFI.getObjectAlignment(FrameIdx)); + MFI.getObjectAlign(FrameIdx)); NewMIs.back()->addMemOperand(MF, MMO); } @@ -1301,7 +1301,7 @@ MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIdx), MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), - MFI.getObjectAlignment(FrameIdx)); + MFI.getObjectAlign(FrameIdx)); NewMIs.back()->addMemOperand(MF, MMO); } diff --git a/llvm/test/TableGen/address-space-patfrags.td b/llvm/test/TableGen/address-space-patfrags.td --- a/llvm/test/TableGen/address-space-patfrags.td +++ b/llvm/test/TableGen/address-space-patfrags.td @@ -81,7 +81,7 @@ // SDAG-NEXT: if (AddrSpace != 999) // SDAG-NEXT: return false; -// SDAG-NEXT: if (cast(N)->getAlignment() < 2) +// SDAG-NEXT: if (cast(N)->getAlign() < Align(2)) // SDAG-NEXT: return false; // SDAG-NEXT: if (cast(N)->getMemoryVT() != MVT::i32) return false; // SDAG-NEXT: return true; diff --git a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp --- a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp +++ b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp @@ -999,9 +999,9 @@ int64_t MinAlign = getMinAlignment(); if (MinAlign > 0) { - Code += "if (cast(N)->getAlignment() < "; + Code += "if (cast(N)->getAlign() < Align("; Code += utostr(MinAlign); - Code += ")\nreturn false;\n"; + Code += "))\nreturn false;\n"; } Record *MemoryVT = getMemoryVT();