diff --git a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h --- a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h @@ -429,7 +429,7 @@ dbgs() << CurrentIdx << ": GIM_CheckMemoryAlignment" << "(MIs[" << InsnID << "]->memoperands() + " << MMOIdx << ")->getAlignment() >= " << MinAlign << ")\n"); - if (MMO->getAlignment() < MinAlign && handleReject() == RejectAndGiveUp) + if (MMO->getAlign() < MinAlign && handleReject() == RejectAndGiveUp) return false; break; diff --git a/llvm/include/llvm/CodeGen/MachineMemOperand.h b/llvm/include/llvm/CodeGen/MachineMemOperand.h --- a/llvm/include/llvm/CodeGen/MachineMemOperand.h +++ b/llvm/include/llvm/CodeGen/MachineMemOperand.h @@ -225,11 +225,25 @@ /// Return the minimum known alignment in bytes of the actual memory /// reference. - uint64_t getAlignment() const; + /// FIXME: Remove once transition to Align is over. + LLVM_ATTRIBUTE_DEPRECATED(uint64_t getAlignment() const, + "Use getAlign instead"); + + /// Return the minimum known alignment in bytes of the actual memory + /// reference. + Align getAlign() const; + + /// Return the minimum known alignment in bytes of the base address, without + /// the offset. + /// FIXME: Remove once transition to Align is over. + LLVM_ATTRIBUTE_DEPRECATED(uint64_t getBaseAlignment() const, + "Use getBaseAlign instead") { + return BaseAlign.value(); + } /// Return the minimum known alignment in bytes of the base address, without /// the offset. - uint64_t getBaseAlignment() const { return BaseAlign.value(); } + Align getBaseAlign() const { return BaseAlign; } /// Return the AA tags for the memory reference. AAMDNodes getAAInfo() const { return AAInfo; } @@ -307,7 +321,7 @@ LHS.getFlags() == RHS.getFlags() && LHS.getAAInfo() == RHS.getAAInfo() && LHS.getRanges() == RHS.getRanges() && - LHS.getAlignment() == RHS.getAlignment() && + LHS.getAlign() == RHS.getAlign() && LHS.getAddrSpace() == RHS.getAddrSpace(); } diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1292,12 +1292,8 @@ bool writeMem() const { return MMO->isStore(); } /// Returns alignment and volatility of the memory access - unsigned getOriginalAlignment() const { - return MMO->getBaseAlignment(); - } - unsigned getAlignment() const { - return MMO->getAlignment(); - } + unsigned getOriginalAlignment() const { return MMO->getBaseAlign().value(); } + unsigned getAlignment() const { return MMO->getAlign().value(); } /// Return the SubclassData value, without HasDebugValue. This contains an /// encoding of the volatile flag, as well as bits used by subclasses. This diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -1268,7 +1268,7 @@ if (IsVolatile) return false; - Align DstAlign(MemOp->getBaseAlignment()); + Align DstAlign = MemOp->getBaseAlign(); Align SrcAlign; Register Dst = MI.getOperand(1).getReg(); Register Src = MI.getOperand(2).getReg(); @@ -1277,7 +1277,7 @@ if (ID != Intrinsic::memset) { assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI"); MemOp = *(++MMOIt); - SrcAlign = Align(MemOp->getBaseAlignment()); + SrcAlign = MemOp->getBaseAlign(); } // See if this is a constant length copy diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp @@ -506,8 +506,7 @@ SmallVector MemDescrs; for (const auto &MMO : MI.memoperands()) MemDescrs.push_back({8 * MMO->getSize() /* in bits */, - 8 * MMO->getAlignment(), - MMO->getOrdering()}); + 8 * MMO->getAlign().value(), MMO->getOrdering()}); return getAction({MI.getOpcode(), Types, MemDescrs}); } diff --git a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp --- a/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp +++ b/llvm/lib/CodeGen/MIRVRegNamerUtils.cpp @@ -112,7 +112,7 @@ MIOperands.push_back((unsigned)Op->getOrdering()); MIOperands.push_back((unsigned)Op->getAddrSpace()); MIOperands.push_back((unsigned)Op->getSyncScopeID()); - MIOperands.push_back((unsigned)Op->getBaseAlignment()); + MIOperands.push_back((unsigned)Op->getBaseAlign().value()); MIOperands.push_back((unsigned)Op->getFailureOrdering()); } diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -486,14 +486,14 @@ // If there is no pointer value, the offset isn't tracked so we need to adjust // the base alignment. - unsigned Align = PtrInfo.V.isNull() - ? MinAlign(MMO->getBaseAlignment(), Offset) - : MMO->getBaseAlignment(); + Align Alignment = PtrInfo.V.isNull() + ? commonAlignment(MMO->getBaseAlign(), Offset) + : MMO->getBaseAlign(); - return new (Allocator) - MachineMemOperand(PtrInfo.getWithOffset(Offset), MMO->getFlags(), Size, - Align, AAMDNodes(), nullptr, MMO->getSyncScopeID(), - MMO->getOrdering(), MMO->getFailureOrdering()); + return new (Allocator) MachineMemOperand( + PtrInfo.getWithOffset(Offset), MMO->getFlags(), Size, Alignment.value(), + AAMDNodes(), nullptr, MMO->getSyncScopeID(), MMO->getOrdering(), + MMO->getFailureOrdering()); } MachineMemOperand * @@ -503,18 +503,17 @@ MachinePointerInfo(MMO->getValue(), MMO->getOffset()) : MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset()); - return new (Allocator) - MachineMemOperand(MPI, MMO->getFlags(), MMO->getSize(), - MMO->getBaseAlignment(), AAInfo, - MMO->getRanges(), MMO->getSyncScopeID(), - MMO->getOrdering(), MMO->getFailureOrdering()); + return new (Allocator) MachineMemOperand( + MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign().value(), AAInfo, + MMO->getRanges(), MMO->getSyncScopeID(), MMO->getOrdering(), + MMO->getFailureOrdering()); } MachineMemOperand * MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, MachineMemOperand::Flags Flags) { return new (Allocator) MachineMemOperand( - MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlignment(), + MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign().value(), MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(), MMO->getOrdering(), MMO->getFailureOrdering()); } diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp --- a/llvm/lib/CodeGen/MachineOperand.cpp +++ b/llvm/lib/CodeGen/MachineOperand.cpp @@ -1014,7 +1014,6 @@ assert((PtrInfo.V.isNull() || PtrInfo.V.is() || isa(PtrInfo.V.get()->getType())) && "invalid pointer value"); - assert(getBaseAlignment() == a && a != 0 && "Alignment is not a power of 2!"); assert((isLoad() || isStore()) && "Not a load/store!"); AtomicInfo.SSID = static_cast(SSID); @@ -1032,7 +1031,7 @@ ID.AddInteger(Size); ID.AddPointer(getOpaqueValue()); ID.AddInteger(getFlags()); - ID.AddInteger(getBaseAlignment()); + ID.AddInteger(getBaseAlign().value()); } void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) { @@ -1041,9 +1040,9 @@ assert(MMO->getFlags() == getFlags() && "Flags mismatch!"); assert(MMO->getSize() == getSize() && "Size mismatch!"); - if (MMO->getBaseAlignment() >= getBaseAlignment()) { + if (MMO->getBaseAlign() >= getBaseAlign()) { // Update the alignment value. - BaseAlign = Align(MMO->getBaseAlignment()); + BaseAlign = MMO->getBaseAlign(); // Also update the base and offset, because the new alignment may // not be applicable with the old ones. PtrInfo = MMO->PtrInfo; @@ -1052,8 +1051,12 @@ /// getAlignment - Return the minimum known alignment in bytes of the /// actual memory reference. -uint64_t MachineMemOperand::getAlignment() const { - return MinAlign(getBaseAlignment(), getOffset()); +uint64_t MachineMemOperand::getAlignment() const { return getAlign().value(); } + +/// getAlign - Return the minimum known alignment in bytes of the +/// actual memory reference. +Align MachineMemOperand::getAlign() const { + return commonAlignment(getBaseAlign(), getOffset()); } void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST, @@ -1148,8 +1151,8 @@ } } MachineOperand::printOperandOffset(OS, getOffset()); - if (getBaseAlignment() != getSize()) - OS << ", align " << getBaseAlignment(); + if (getBaseAlign() != getSize()) + OS << ", align " << getBaseAlign().value(); auto AAInfo = getAAInfo(); if (AAInfo.TBAA) { OS << ", !tbaa "; diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -21342,16 +21342,16 @@ // multiples of the size of the data. int64_t SrcValOffset0 = MUC0.MMO->getOffset(); int64_t SrcValOffset1 = MUC1.MMO->getOffset(); - unsigned OrigAlignment0 = MUC0.MMO->getBaseAlignment(); - unsigned OrigAlignment1 = MUC1.MMO->getBaseAlignment(); + Align OrigAlignment0 = MUC0.MMO->getBaseAlign(); + Align OrigAlignment1 = MUC1.MMO->getBaseAlign(); auto &Size0 = MUC0.NumBytes; auto &Size1 = MUC1.NumBytes; if (OrigAlignment0 == OrigAlignment1 && SrcValOffset0 != SrcValOffset1 && Size0.hasValue() && Size1.hasValue() && *Size0 == *Size1 && OrigAlignment0 > *Size0 && SrcValOffset0 % *Size0 == 0 && SrcValOffset1 % *Size1 == 0) { - int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0; - int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1; + int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0.value(); + int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1.value(); // There is no overlap between these relatively aligned accesses of // similar size. Return no alias. diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -1557,7 +1557,7 @@ LLVMContext &Context, const DataLayout &DL, EVT VT, const MachineMemOperand &MMO, bool *Fast) const { return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(), - MMO.getAlignment(), MMO.getFlags(), + MMO.getAlign().value(), MMO.getFlags(), Fast); } @@ -1573,7 +1573,7 @@ const MachineMemOperand &MMO, bool *Fast) const { return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), - MMO.getAlignment(), MMO.getFlags(), Fast); + MMO.getAlign().value(), MMO.getFlags(), Fast); } BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const { diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -444,15 +444,15 @@ AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT; // There are no extending SMRD/SMEM loads, and they require 4-byte alignment. - return MMO->getSize() >= 4 && MMO->getAlignment() >= 4 && - // Can't do a scalar atomic load. - !MMO->isAtomic() && - // Don't use scalar loads for volatile accesses to non-constant address - // spaces. - (IsConst || !MMO->isVolatile()) && - // Memory must be known constant, or not written before this load. - (IsConst || MMO->isInvariant() || memOpHasNoClobbered(MMO)) && - AMDGPUInstrInfo::isUniformMMO(MMO); + return MMO->getSize() >= 4 && MMO->getAlign() >= Align(4) && + // Can't do a scalar atomic load. + !MMO->isAtomic() && + // Don't use scalar loads for volatile accesses to non-constant address + // spaces. + (IsConst || !MMO->isVolatile()) && + // Memory must be known constant, or not written before this load. + (IsConst || MMO->isInvariant() || memOpHasNoClobbered(MMO)) && + AMDGPUInstrInfo::isUniformMMO(MMO); } RegisterBankInfo::InstructionMappings diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -3777,7 +3777,7 @@ // If there are odd number of registers or if it's not 64-bit aligned, // then it takes an extra AGU (Address Generation Unit) cycle. if ((NumRegs % 2) || !MI.hasOneMemOperand() || - (*MI.memoperands_begin())->getAlignment() < 8) + (*MI.memoperands_begin())->getAlign() < Align(8)) ++UOps; return UOps; } @@ -4364,10 +4364,10 @@ return -1; unsigned DefAlign = DefMI.hasOneMemOperand() - ? (*DefMI.memoperands_begin())->getAlignment() + ? (*DefMI.memoperands_begin())->getAlign().value() : 0; unsigned UseAlign = UseMI.hasOneMemOperand() - ? (*UseMI.memoperands_begin())->getAlignment() + ? (*UseMI.memoperands_begin())->getAlign().value() : 0; // Get the itinerary's latency if possible, and handle variable_ops. @@ -4414,10 +4414,12 @@ const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode()); auto *DefMN = cast(DefNode); unsigned DefAlign = !DefMN->memoperands_empty() - ? (*DefMN->memoperands_begin())->getAlignment() : 0; + ? (*DefMN->memoperands_begin())->getAlign().value() + : 0; auto *UseMN = cast(UseNode); unsigned UseAlign = !UseMN->memoperands_empty() - ? (*UseMN->memoperands_begin())->getAlignment() : 0; + ? (*UseMN->memoperands_begin())->getAlign().value() + : 0; int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign); @@ -4708,7 +4710,7 @@ // Adjust for dynamic def-side opcode variants not captured by the itinerary. unsigned DefAlign = - MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlignment() : 0; + MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlign().value() : 0; int Adj = adjustDefLatency(Subtarget, MI, MCID, DefAlign); if (Adj >= 0 || (int)Latency > -Adj) { return Latency + Adj; diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp --- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1608,7 +1608,7 @@ // Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is // not. - if (MMO.getAlignment() < 4) + if (MMO.getAlign() < Align(4)) return false; // str could probably be eliminated entirely, but for now we just want @@ -2183,12 +2183,12 @@ (*Op0->memoperands_begin())->isAtomic()) return false; - unsigned Align = (*Op0->memoperands_begin())->getAlignment(); + Align Alignment = (*Op0->memoperands_begin())->getAlign(); const Function &Func = MF->getFunction(); - unsigned ReqAlign = STI->hasV6Ops() - ? TD->getABITypeAlignment(Type::getInt64Ty(Func.getContext())) - : 8; // Pre-v6 need 8-byte align - if (Align < ReqAlign) + Align ReqAlign = + STI->hasV6Ops() ? TD->getABITypeAlign(Type::getInt64Ty(Func.getContext())) + : Align(8); // Pre-v6 need 8-byte align + if (Alignment < ReqAlign) return false; // Then make sure the immediate offset fits. diff --git a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp --- a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -457,7 +457,7 @@ return false; if (!MI->hasOneMemOperand() || - (*MI->memoperands_begin())->getAlignment() < 4) + (*MI->memoperands_begin())->getAlign() < Align(4)) return false; // We're creating a completely different type of load/store - LDM from LDR. diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -1027,10 +1027,9 @@ auto UseAligned = [&] (const MachineInstr &MI, unsigned NeedAlign) { if (MI.memoperands().empty()) return false; - return all_of(MI.memoperands(), - [NeedAlign] (const MachineMemOperand *MMO) { - return NeedAlign <= MMO->getAlignment(); - }); + return all_of(MI.memoperands(), [NeedAlign](const MachineMemOperand *MMO) { + return MMO->getAlign() >= NeedAlign; + }); }; switch (Opc) { diff --git a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp --- a/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp +++ b/llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp @@ -688,11 +688,12 @@ for (auto &MO : MI->memoperands()) { const MachinePointerInfo &Ptr = MO->getPointerInfo(); MachineMemOperand::Flags F = MO->getFlags(); - int A = MO->getAlignment(); + Align A = MO->getAlign(); - auto *Tmp1 = MF.getMachineMemOperand(Ptr, F, 4/*size*/, A); + auto *Tmp1 = MF.getMachineMemOperand(Ptr, F, 4 /*size*/, A.value()); LowI->addMemOperand(MF, Tmp1); - auto *Tmp2 = MF.getMachineMemOperand(Ptr, F, 4/*size*/, std::min(A, 4)); + auto *Tmp2 = MF.getMachineMemOperand(Ptr, F, 4 /*size*/, + std::min(A, Align(4)).value()); HighI->addMemOperand(MF, Tmp2); } } diff --git a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp --- a/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp +++ b/llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp @@ -314,7 +314,7 @@ MachineInstr *FirstMI = *Begin; assert(!FirstMI->memoperands_empty() && "Expecting some memory operands"); const MachineMemOperand &FirstMMO = getStoreTarget(FirstMI); - unsigned Alignment = FirstMMO.getAlignment(); + unsigned Alignment = FirstMMO.getAlign().value(); unsigned SizeAccum = FirstMMO.getSize(); unsigned FirstOffset = getStoreOffset(FirstMI); @@ -416,10 +416,9 @@ MachineInstr *FirstSt = OG.front(); DebugLoc DL = OG.back()->getDebugLoc(); const MachineMemOperand &OldM = getStoreTarget(FirstSt); - MachineMemOperand *NewM = - MF->getMachineMemOperand(OldM.getPointerInfo(), OldM.getFlags(), - TotalSize, OldM.getAlignment(), - OldM.getAAInfo()); + MachineMemOperand *NewM = MF->getMachineMemOperand( + OldM.getPointerInfo(), OldM.getFlags(), TotalSize, + OldM.getAlign().value(), OldM.getAAInfo()); if (Acc < 0x10000) { // Create mem[hw] = #Acc diff --git a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp --- a/llvm/lib/Target/Mips/MipsInstructionSelector.cpp +++ b/llvm/lib/Target/Mips/MipsInstructionSelector.cpp @@ -462,7 +462,7 @@ } // Unaligned memory access - if (MMO->getSize() > MMO->getAlignment() && + if (MMO->getAlign() < MMO->getSize() && !STI.systemSupportsUnalignedAccess()) { if (MMO->getSize() != 4 || !isRegInGprb(I.getOperand(0).getReg(), MRI)) return false; diff --git a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp --- a/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp +++ b/llvm/lib/Target/Mips/MipsPreLegalizerCombiner.cpp @@ -52,7 +52,7 @@ static_cast(MI.getMF()->getSubtarget()); if (!isPowerOf2_64(MMO->getSize())) return false; - bool isUnaligned = MMO->getSize() > MMO->getAlignment(); + bool isUnaligned = MMO->getAlign() < MMO->getSize(); if (!STI.systemSupportsUnalignedAccess() && isUnaligned) return false; diff --git a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp --- a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp +++ b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp @@ -157,7 +157,7 @@ const MipsSubtarget &STI = static_cast(MI->getMF()->getSubtarget()); if (MMO->getSize() == 4 && (!STI.systemSupportsUnalignedAccess() && - MMO->getSize() > MMO->getAlignment())) + MMO->getAlign() < MMO->getSize())) return true; } return false; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -13650,8 +13650,8 @@ // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is // aligned and the type is a vector with elements up to 4 bytes - if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) - && VecTy.getScalarSizeInBits() <= 32 ) { + if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) && + VecTy.getScalarSizeInBits() <= 32) { return SDValue(); } @@ -13721,8 +13721,8 @@ // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is // aligned and the type is a vector with elements up to 4 bytes - if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) - && VecTy.getScalarSizeInBits() <= 32 ) { + if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) && + VecTy.getScalarSizeInBits() <= 32) { return SDValue(); } diff --git a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp --- a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp +++ b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp @@ -92,9 +92,9 @@ return; const MachineMemOperand *MMO = *MI->memoperands_begin(); unsigned AlignmentHint = 0; - if (MMO->getAlignment() >= 16) + if (MMO->getAlign() >= Align(16)) AlignmentHint = 4; - else if (MMO->getAlignment() >= 8) + else if (MMO->getAlign() >= Align(8)) AlignmentHint = 3; if (AlignmentHint == 0) return; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp b/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp @@ -65,7 +65,7 @@ assert(MI.getDesc().OpInfo[OperandNo].OperandType == WebAssembly::OPERAND_P2ALIGN && "Load and store instructions should have a p2align operand"); - uint64_t P2Align = Log2_64((*MI.memoperands_begin())->getAlignment()); + uint64_t P2Align = Log2((*MI.memoperands_begin())->getAlign()); // WebAssembly does not currently support supernatural alignment. P2Align = std::min(P2Align, diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -5811,7 +5811,7 @@ // Determine the alignment of the load. Align Alignment; if (LoadMI.hasOneMemOperand()) - Alignment = Align((*LoadMI.memoperands_begin())->getAlignment()); + Alignment = (*LoadMI.memoperands_begin())->getAlign(); else switch (LoadMI.getOpcode()) { case X86::AVX512_512_SET0: @@ -6092,7 +6092,7 @@ Opc = getBroadcastOpcode(I, RC, Subtarget); } else { unsigned Alignment = std::max(TRI.getSpillSize(*RC), 16); - bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; + bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment; Opc = getLoadRegOpcode(Reg, RC, isAligned, Subtarget); } @@ -6169,7 +6169,7 @@ const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF); auto MMOs = extractStoreMMOs(MI.memoperands(), MF); unsigned Alignment = std::max(TRI.getSpillSize(*DstRC), 16); - bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; + bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment; unsigned Opc = getStoreRegOpcode(Reg, DstRC, isAligned, Subtarget); DebugLoc DL; MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); @@ -6236,7 +6236,7 @@ Opc = getBroadcastOpcode(I, RC, Subtarget); } else { unsigned Alignment = std::max(TRI.getSpillSize(*RC), 16); - bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; + bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment; Opc = getLoadRegOpcode(0, RC, isAligned, Subtarget); } @@ -6302,7 +6302,7 @@ // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte // memory access is slow above. unsigned Alignment = std::max(TRI.getSpillSize(*RC), 16); - bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; + bool isAligned = !MMOs.empty() && MMOs.front()->getAlign() >= Alignment; SDNode *Store = DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget), dl, MVT::Other, AddrOps); diff --git a/llvm/lib/Target/X86/X86InstructionSelector.cpp b/llvm/lib/Target/X86/X86InstructionSelector.cpp --- a/llvm/lib/Target/X86/X86InstructionSelector.cpp +++ b/llvm/lib/Target/X86/X86InstructionSelector.cpp @@ -71,7 +71,7 @@ // TODO: remove after supported by Tablegen-erated instruction selection. unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc, - uint64_t Alignment) const; + Align Alignment) const; bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) const; @@ -394,7 +394,7 @@ unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc, - uint64_t Alignment) const { + Align Alignment) const { bool Isload = (Opc == TargetOpcode::G_LOAD); bool HasAVX = STI.hasAVX(); bool HasAVX512 = STI.hasAVX512(); @@ -427,7 +427,7 @@ HasAVX ? X86::VMOVSDmr : X86::MOVSDmr); } else if (Ty.isVector() && Ty.getSizeInBits() == 128) { - if (Alignment >= 16) + if (Alignment >= Align(16)) return Isload ? (HasVLX ? X86::VMOVAPSZ128rm : HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX @@ -446,7 +446,7 @@ ? X86::VMOVUPSZ128mr_NOVLX : HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr); } else if (Ty.isVector() && Ty.getSizeInBits() == 256) { - if (Alignment >= 32) + if (Alignment >= Align(32)) return Isload ? (HasVLX ? X86::VMOVAPSZ256rm : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX : X86::VMOVAPSYrm) @@ -461,7 +461,7 @@ : HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX : X86::VMOVUPSYmr); } else if (Ty.isVector() && Ty.getSizeInBits() == 512) { - if (Alignment >= 64) + if (Alignment >= Align(64)) return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr; else return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr; @@ -520,13 +520,13 @@ LLVM_DEBUG(dbgs() << "Atomic ordering not supported yet\n"); return false; } - if (MemOp.getAlignment() < Ty.getSizeInBits()/8) { + if (MemOp.getAlign() < Ty.getSizeInBits() / 8) { LLVM_DEBUG(dbgs() << "Unaligned atomics not supported yet\n"); return false; } } - unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment()); + unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlign()); if (NewOpc == Opc) return false; @@ -1435,14 +1435,16 @@ const Register DstReg = I.getOperand(0).getReg(); const LLT DstTy = MRI.getType(DstReg); const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI); - unsigned Align = DstTy.getSizeInBytes(); + Align Alignment = Align(DstTy.getSizeInBytes()); const DebugLoc &DbgLoc = I.getDebugLoc(); - unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align); + unsigned Opc = + getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Alignment); // Create the load from the constant pool. const ConstantFP *CFP = I.getOperand(1).getFPImm(); - unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Align); + unsigned CPI = + MF.getConstantPool()->getConstantPoolIndex(CFP, Alignment.value()); MachineInstr *LoadInst = nullptr; unsigned char OpFlag = STI.classifyLocalReference(nullptr); @@ -1456,7 +1458,7 @@ MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad, - MF.getDataLayout().getPointerSize(), Align); + MF.getDataLayout().getPointerSize(), Alignment.value()); LoadInst = addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),