diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -580,9 +580,9 @@ bool isLegalMaskedLoad(Type *DataType) const; /// Return true if the target supports nontemporal store. - bool isLegalNTStore(Type *DataType, llvm::Align Alignment) const; + bool isLegalNTStore(Type *DataType, Align Alignment) const; /// Return true if the target supports nontemporal load. - bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) const; + bool isLegalNTLoad(Type *DataType, Align Alignment) const; /// Return true if the target supports masked scatter. bool isLegalMaskedScatter(Type *DataType) const; @@ -1196,8 +1196,8 @@ virtual bool shouldFavorBackedgeIndex(const Loop *L) const = 0; virtual bool isLegalMaskedStore(Type *DataType) = 0; virtual bool isLegalMaskedLoad(Type *DataType) = 0; - virtual bool isLegalNTStore(Type *DataType, llvm::Align Alignment) = 0; - virtual bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) = 0; + virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0; + virtual bool isLegalNTLoad(Type *DataType, Align Alignment) = 0; virtual bool isLegalMaskedScatter(Type *DataType) = 0; virtual bool isLegalMaskedGather(Type *DataType) = 0; virtual bool isLegalMaskedCompressStore(Type *DataType) = 0; @@ -1471,10 +1471,10 @@ bool isLegalMaskedLoad(Type *DataType) override { return Impl.isLegalMaskedLoad(DataType); } - bool isLegalNTStore(Type *DataType, llvm::Align Alignment) override { + bool isLegalNTStore(Type *DataType, Align Alignment) override { return Impl.isLegalNTStore(DataType, Alignment); } - bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) override { + bool isLegalNTLoad(Type *DataType, Align Alignment) override { return Impl.isLegalNTLoad(DataType, Alignment); } bool isLegalMaskedScatter(Type *DataType) override { diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -247,14 +247,14 @@ bool isLegalMaskedLoad(Type *DataType) { return false; } - bool isLegalNTStore(Type *DataType, llvm::Align Alignment) { + bool isLegalNTStore(Type *DataType, Align Alignment) { // By default, assume nontemporal memory stores are available for stores // that are aligned and have a size that is a power of 2. unsigned DataSize = DL.getTypeStoreSize(DataType); return Alignment >= DataSize && isPowerOf2_32(DataSize); } - bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) { + bool isLegalNTLoad(Type *DataType, Align Alignment) { // By default, assume nontemporal memory loads are available for loads that // are aligned and have a size that is a power of 2. unsigned DataSize = DL.getTypeStoreSize(DataType); diff --git a/llvm/include/llvm/CodeGen/AsmPrinter.h b/llvm/include/llvm/CodeGen/AsmPrinter.h --- a/llvm/include/llvm/CodeGen/AsmPrinter.h +++ b/llvm/include/llvm/CodeGen/AsmPrinter.h @@ -350,7 +350,7 @@ /// global value is specified, and if that global has an explicit alignment /// requested, it will override the alignment request if required for /// correctness. - void EmitAlignment(llvm::Align Align, const GlobalObject *GV = nullptr) const; + void EmitAlignment(Align Align, const GlobalObject *GV = nullptr) const; /// Lower the specified LLVM Constant to an MCExpr. virtual const MCExpr *lowerConstant(const Constant *CV); @@ -643,8 +643,8 @@ void EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const; /// Return the alignment for the specified \p GV. - static llvm::Align getGVAlignment(const GlobalValue *GV, const DataLayout &DL, - llvm::Align InAlign = llvm::Align::None()); + static Align getGVAlignment(const GlobalValue *GV, const DataLayout &DL, + Align InAlign = Align::None()); private: /// Private state for PrintSpecial() diff --git a/llvm/include/llvm/CodeGen/CallingConvLower.h b/llvm/include/llvm/CodeGen/CallingConvLower.h --- a/llvm/include/llvm/CodeGen/CallingConvLower.h +++ b/llvm/include/llvm/CodeGen/CallingConvLower.h @@ -424,7 +424,7 @@ /// AllocateStack - Allocate a chunk of stack space with the specified size /// and alignment. unsigned AllocateStack(unsigned Size, unsigned Alignment) { - const llvm::Align Align(Alignment); + const Align Align(Alignment); StackOffset = alignTo(StackOffset, Align); unsigned Result = StackOffset; StackOffset += Size; @@ -433,7 +433,7 @@ return Result; } - void ensureMaxAlignment(llvm::Align Align) { + void ensureMaxAlignment(Align Align) { if (!AnalyzingMustTailForwardedRegs) MF.getFrameInfo().ensureMaxAlignment(Align.value()); } diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h --- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h +++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h @@ -105,7 +105,7 @@ /// Alignment of the basic block. One if the basic block does not need to be /// aligned. - llvm::Align Alignment; + Align Alignment; /// Indicate that this basic block is entered via an exception handler. bool IsEHPad = false; @@ -373,10 +373,10 @@ const uint32_t *getEndClobberMask(const TargetRegisterInfo *TRI) const; /// Return alignment of the basic block. - llvm::Align getAlignment() const { return Alignment; } + Align getAlignment() const { return Alignment; } /// Set alignment of the basic block. - void setAlignment(llvm::Align A) { Alignment = A; } + void setAlignment(Align A) { Alignment = A; } /// Returns true if the block is a landing pad. That is this basic block is /// entered via an exception handler. diff --git a/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/llvm/include/llvm/CodeGen/MachineFrameInfo.h --- a/llvm/include/llvm/CodeGen/MachineFrameInfo.h +++ b/llvm/include/llvm/CodeGen/MachineFrameInfo.h @@ -181,7 +181,7 @@ uint8_t SSPLayout; - StackObject(uint64_t Size, llvm::Align Alignment, int64_t SPOffset, + StackObject(uint64_t Size, Align Alignment, int64_t SPOffset, bool IsImmutable, bool IsSpillSlot, const AllocaInst *Alloca, bool IsAliased, uint8_t StackID = 0) : SPOffset(SPOffset), Size(Size), Alignment(Alignment), @@ -564,7 +564,7 @@ unsigned getMaxAlignment() const { return MaxAlignment.value(); } /// Make sure the function is at least Align bytes aligned. - void ensureMaxAlignment(llvm::Align Align); + void ensureMaxAlignment(Align Align); /// FIXME: Remove this once transition to Align is over. inline void ensureMaxAlignment(unsigned Align) { ensureMaxAlignment(assumeAligned(Align)); @@ -732,9 +732,9 @@ /// Create a new statically sized stack object, returning /// a nonnegative identifier to represent it. - int CreateStackObject(uint64_t Size, llvm::Align Alignment, bool isSpillSlot, + int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca = nullptr, uint8_t ID = 0); - /// FIXME: Remove this function when transition to llvm::Align is over. + /// FIXME: Remove this function when transition to Align is over. inline int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca = nullptr, @@ -745,8 +745,8 @@ /// Create a new statically sized stack object that represents a spill slot, /// returning a nonnegative identifier to represent it. - int CreateSpillStackObject(uint64_t Size, llvm::Align Alignment); - /// FIXME: Remove this function when transition to llvm::Align is over. + int CreateSpillStackObject(uint64_t Size, Align Alignment); + /// FIXME: Remove this function when transition to Align is over. inline int CreateSpillStackObject(uint64_t Size, unsigned Alignment) { return CreateSpillStackObject(Size, assumeAligned(Alignment)); } @@ -760,9 +760,8 @@ /// Notify the MachineFrameInfo object that a variable sized object has been /// created. This must be created whenever a variable sized object is /// created, whether or not the index returned is actually used. - int CreateVariableSizedObject(llvm::Align Alignment, - const AllocaInst *Alloca); - /// FIXME: Remove this function when transition to llvm::Align is over. + int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca); + /// FIXME: Remove this function when transition to Align is over. int CreateVariableSizedObject(unsigned Alignment, const AllocaInst *Alloca) { return CreateVariableSizedObject(assumeAligned(Alignment), Alloca); } diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h --- a/llvm/include/llvm/CodeGen/MachineFunction.h +++ b/llvm/include/llvm/CodeGen/MachineFunction.h @@ -277,7 +277,7 @@ unsigned FunctionNumber; /// Alignment - The alignment of the function. - llvm::Align Alignment; + Align Alignment; /// ExposesReturnsTwice - True if the function calls setjmp or related /// functions with attribute "returns twice", but doesn't have @@ -509,13 +509,13 @@ WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; } /// getAlignment - Return the alignment of the function. - llvm::Align getAlignment() const { return Alignment; } + Align getAlignment() const { return Alignment; } /// setAlignment - Set the alignment of the function. - void setAlignment(llvm::Align A) { Alignment = A; } + void setAlignment(Align A) { Alignment = A; } /// ensureAlignment - Make sure the function is at least A bytes aligned. - void ensureAlignment(llvm::Align A) { + void ensureAlignment(Align A) { if (Alignment < A) Alignment = A; } diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -1597,18 +1597,18 @@ } /// Return the minimum stack alignment of an argument. - llvm::Align getMinStackArgumentAlignment() const { + Align getMinStackArgumentAlignment() const { return MinStackArgumentAlignment; } /// Return the minimum function alignment. - llvm::Align getMinFunctionAlignment() const { return MinFunctionAlignment; } + Align getMinFunctionAlignment() const { return MinFunctionAlignment; } /// Return the preferred function alignment. - llvm::Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; } + Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; } /// Return the preferred loop alignment. - virtual llvm::Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const { + virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const { return PrefLoopAlignment; } @@ -2120,23 +2120,19 @@ } /// Set the target's minimum function alignment. - void setMinFunctionAlignment(llvm::Align Align) { - MinFunctionAlignment = Align; - } + void setMinFunctionAlignment(Align Align) { MinFunctionAlignment = Align; } /// Set the target's preferred function alignment. This should be set if /// there is a performance benefit to higher-than-minimum alignment - void setPrefFunctionAlignment(llvm::Align Align) { - PrefFunctionAlignment = Align; - } + void setPrefFunctionAlignment(Align Align) { PrefFunctionAlignment = Align; } /// Set the target's preferred loop alignment. Default alignment is one, it /// means the target does not care about loop alignment. The target may also /// override getPrefLoopAlignment to provide per-loop values. - void setPrefLoopAlignment(llvm::Align Align) { PrefLoopAlignment = Align; } + void setPrefLoopAlignment(Align Align) { PrefLoopAlignment = Align; } /// Set the minimum stack alignment of an argument. - void setMinStackArgumentAlignment(llvm::Align Align) { + void setMinStackArgumentAlignment(Align Align) { MinStackArgumentAlignment = Align; } @@ -2699,18 +2695,18 @@ Sched::Preference SchedPreferenceInfo; /// The minimum alignment that any argument on the stack needs to have. - llvm::Align MinStackArgumentAlignment; + Align MinStackArgumentAlignment; /// The minimum function alignment (used when optimizing for size, and to /// prevent explicitly provided alignment from leading to incorrect code). - llvm::Align MinFunctionAlignment; + Align MinFunctionAlignment; /// The preferred function alignment (used when alignment unspecified and /// optimizing for speed). - llvm::Align PrefFunctionAlignment; + Align PrefFunctionAlignment; /// The preferred loop alignment (in log2 bot in bytes). - llvm::Align PrefLoopAlignment; + Align PrefLoopAlignment; /// Size in bits of the maximum atomics size the backend supports. /// Accesses larger than this will be expanded by AtomicExpandPass. diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h --- a/llvm/include/llvm/IR/DataLayout.h +++ b/llvm/include/llvm/IR/DataLayout.h @@ -72,11 +72,11 @@ /// Alignment type from \c AlignTypeEnum unsigned AlignType : 8; unsigned TypeBitWidth : 24; - llvm::Align ABIAlign; - llvm::Align PrefAlign; + Align ABIAlign; + Align PrefAlign; - static LayoutAlignElem get(AlignTypeEnum align_type, llvm::Align abi_align, - llvm::Align pref_align, uint32_t bit_width); + static LayoutAlignElem get(AlignTypeEnum align_type, Align abi_align, + Align pref_align, uint32_t bit_width); bool operator==(const LayoutAlignElem &rhs) const; }; @@ -88,15 +88,15 @@ /// \note The unusual order of elements in the structure attempts to reduce /// padding and make the structure slightly more cache friendly. struct PointerAlignElem { - llvm::Align ABIAlign; - llvm::Align PrefAlign; + Align ABIAlign; + Align PrefAlign; uint32_t TypeByteWidth; uint32_t AddressSpace; uint32_t IndexWidth; /// Initializer - static PointerAlignElem get(uint32_t AddressSpace, llvm::Align ABIAlign, - llvm::Align PrefAlign, uint32_t TypeByteWidth, + static PointerAlignElem get(uint32_t AddressSpace, Align ABIAlign, + Align PrefAlign, uint32_t TypeByteWidth, uint32_t IndexWidth); bool operator==(const PointerAlignElem &rhs) const; @@ -173,16 +173,15 @@ /// well-defined bitwise representation. SmallVector NonIntegralAddressSpaces; - void setAlignment(AlignTypeEnum align_type, llvm::Align abi_align, - llvm::Align pref_align, uint32_t bit_width); - llvm::Align getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width, - bool ABIAlign, Type *Ty) const; - void setPointerAlignment(uint32_t AddrSpace, llvm::Align ABIAlign, - llvm::Align PrefAlign, uint32_t TypeByteWidth, - uint32_t IndexWidth); + void setAlignment(AlignTypeEnum align_type, Align abi_align, Align pref_align, + uint32_t bit_width); + Align getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width, + bool ABIAlign, Type *Ty) const; + void setPointerAlignment(uint32_t AddrSpace, Align ABIAlign, Align PrefAlign, + uint32_t TypeByteWidth, uint32_t IndexWidth); /// Internal helper method that returns requested alignment for type. - llvm::Align getAlignment(Type *Ty, bool abi_or_pref) const; + Align getAlignment(Type *Ty, bool abi_or_pref) const; /// Parses a target data specification string. Assert if the string is /// malformed. @@ -262,11 +261,11 @@ bool isIllegalInteger(uint64_t Width) const { return !isLegalInteger(Width); } /// Returns true if the given alignment exceeds the natural stack alignment. - bool exceedsNaturalStackAlignment(llvm::Align Align) const { + bool exceedsNaturalStackAlignment(Align Align) const { return StackNaturalAlign && (Align > StackNaturalAlign); } - llvm::Align getStackAlignment() const { + Align getStackAlignment() const { assert(StackNaturalAlign && "StackNaturalAlign must be defined"); return *StackNaturalAlign; } @@ -349,12 +348,12 @@ } /// Layout pointer alignment - llvm::Align getPointerABIAlignment(unsigned AS) const; + Align getPointerABIAlignment(unsigned AS) const; /// Return target's alignment for stack-based pointers /// FIXME: The defaults need to be removed once all of /// the backends/clients are updated. - llvm::Align getPointerPrefAlignment(unsigned AS = 0) const; + Align getPointerPrefAlignment(unsigned AS = 0) const; /// Layout pointer size /// FIXME: The defaults need to be removed once all of @@ -490,7 +489,7 @@ /// Returns the minimum ABI-required alignment for an integer type of /// the specified bitwidth. - llvm::Align getABIIntegerTypeAlignment(unsigned BitWidth) const; + Align getABIIntegerTypeAlignment(unsigned BitWidth) const; /// Returns the preferred stack/global alignment for the specified /// type. @@ -562,7 +561,7 @@ /// based on the DataLayout structure. class StructLayout { uint64_t StructSize; - llvm::Align StructAlignment; + Align StructAlignment; unsigned IsPadded : 1; unsigned NumElements : 31; uint64_t MemberOffsets[1]; // variable sized array! @@ -572,7 +571,7 @@ uint64_t getSizeInBits() const { return 8 * StructSize; } - llvm::Align getAlignment() const { return StructAlignment; } + Align getAlignment() const { return StructAlignment; } /// Returns whether the struct has padding or not between its fields. /// NB: Padding in nested element is not taken into account. diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -114,9 +114,9 @@ return MA->value(); return 0; } - // FIXME: Remove once migration to llvm::Align is over. + // FIXME: Remove once migration to Align is over. void setAlignment(unsigned Align); - void setAlignment(llvm::MaybeAlign Align); + void setAlignment(MaybeAlign Align); /// Return true if this alloca is in the entry block of the function and is a /// constant size. If so, the code generator will fold it into the @@ -248,9 +248,9 @@ return 0; } - // FIXME: Remove once migration to llvm::Align is over. + // FIXME: Remove once migration to Align is over. void setAlignment(unsigned Align); - void setAlignment(llvm::MaybeAlign Align); + void setAlignment(MaybeAlign Align); /// Returns the ordering constraint of this load instruction. AtomicOrdering getOrdering() const { @@ -378,9 +378,9 @@ return 0; } - // FIXME: Remove once migration to llvm::Align is over. + // FIXME: Remove once migration to Align is over. void setAlignment(unsigned Align); - void setAlignment(llvm::MaybeAlign Align); + void setAlignment(MaybeAlign Align); /// Returns the ordering constraint of this store instruction. AtomicOrdering getOrdering() const { diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h --- a/llvm/include/llvm/MC/MCSection.h +++ b/llvm/include/llvm/MC/MCSection.h @@ -59,7 +59,7 @@ MCSymbol *Begin; MCSymbol *End = nullptr; /// The alignment requirement of this section. - llvm::Align Alignment; + Align Alignment; /// The section index in the assemblers section list. unsigned Ordinal = 0; /// The index of this section in the layout order. @@ -119,7 +119,7 @@ bool hasEnded() const; unsigned getAlignment() const { return Alignment.value(); } - void setAlignment(llvm::Align Value) { Alignment = Value; } + void setAlignment(Align Value) { Alignment = Value; } unsigned getOrdinal() const { return Ordinal; } void setOrdinal(unsigned Value) { Ordinal = Value; } diff --git a/llvm/include/llvm/Support/Alignment.h b/llvm/include/llvm/Support/Alignment.h --- a/llvm/include/llvm/Support/Alignment.h +++ b/llvm/include/llvm/Support/Alignment.h @@ -76,7 +76,7 @@ /// Returns a default constructed Align which corresponds to no alignment. /// This is useful to test for unalignment as it conveys clear semantic. - /// `if (A != llvm::Align::None())` + /// `if (A != Align::None())` /// would be better than /// `if (A > llvm::Align(1))` constexpr static const Align None() { return llvm::Align(); } @@ -142,7 +142,7 @@ /// Returns the offset to the next integer (mod 2**64) that is greater than /// or equal to \p Value and is a multiple of \p Align. -inline uint64_t offsetToAlignment(uint64_t Value, llvm::Align Align) { +inline uint64_t offsetToAlignment(uint64_t Value, Align Align) { return alignTo(Value, Align) - Value; } diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -302,12 +302,11 @@ } bool TargetTransformInfo::isLegalNTStore(Type *DataType, - llvm::Align Alignment) const { + Align Alignment) const { return TTIImpl->isLegalNTStore(DataType, Alignment); } -bool TargetTransformInfo::isLegalNTLoad(Type *DataType, - llvm::Align Alignment) const { +bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const { return TTIImpl->isLegalNTLoad(DataType, Alignment); } diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -163,10 +163,9 @@ /// getGVAlignment - Return the alignment to use for the specified global /// value. This rounds up to the preferred alignment if possible and legal. -llvm::Align AsmPrinter::getGVAlignment(const GlobalValue *GV, - const DataLayout &DL, - llvm::Align InAlign) { - llvm::Align Align; +Align AsmPrinter::getGVAlignment(const GlobalValue *GV, const DataLayout &DL, + Align InAlign) { + Align Align; if (const GlobalVariable *GVar = dyn_cast(GV)) Align = llvm::Align(DL.getPreferredAlignment(GVar)); @@ -175,7 +174,7 @@ Align = InAlign; // If the GV has a specified alignment, take it into account. - const llvm::MaybeAlign GVAlign(GV->getAlignment()); + const MaybeAlign GVAlign(GV->getAlignment()); if (!GVAlign) return Align; @@ -507,7 +506,7 @@ // If the alignment is specified, we *must* obey it. Overaligning a global // with a specified alignment is a prompt way to break globals emitted to // sections and expected to be contiguous (e.g. ObjC metadata). - const llvm::Align Align = getGVAlignment(GV, DL); + const Align Align = getGVAlignment(GV, DL); for (const HandlerInfo &HI : Handlers) { NamedRegionTimer T(HI.TimerName, HI.TimerDescription, @@ -2026,7 +2025,7 @@ llvm::stable_sort(Structors, [](const Structor &L, const Structor &R) { return L.Priority < R.Priority; }); - const llvm::Align Align = DL.getPointerPrefAlignment(); + const Align Align = DL.getPointerPrefAlignment(); for (Structor &S : Structors) { const TargetLoweringObjectFile &Obj = getObjFileLowering(); const MCSymbol *KeySym = nullptr; @@ -2150,8 +2149,7 @@ // two boundary. If a global value is specified, and if that global has // an explicit alignment requested, it will override the alignment request // if required for correctness. -void AsmPrinter::EmitAlignment(llvm::Align Align, - const GlobalObject *GV) const { +void AsmPrinter::EmitAlignment(Align Align, const GlobalObject *GV) const { if (GV) Align = getGVAlignment(GV, GV->getParent()->getDataLayout(), Align); @@ -2936,8 +2934,8 @@ } // Emit an alignment directive for this block, if needed. - const llvm::Align Align = MBB.getAlignment(); - if (Align != llvm::Align::None()) + const Align Align = MBB.getAlignment(); + if (Align != Align::None()) EmitAlignment(Align); MCCodePaddingContext Context; setupCodePaddingContext(MBB, Context); diff --git a/llvm/lib/CodeGen/BranchRelaxation.cpp b/llvm/lib/CodeGen/BranchRelaxation.cpp --- a/llvm/lib/CodeGen/BranchRelaxation.cpp +++ b/llvm/lib/CodeGen/BranchRelaxation.cpp @@ -65,17 +65,17 @@ /// block. unsigned postOffset(const MachineBasicBlock &MBB) const { const unsigned PO = Offset + Size; - const llvm::Align Align = MBB.getAlignment(); - if (Align == 1) + const Align Alignment = MBB.getAlignment(); + if (Alignment == 1) return PO; - const llvm::Align ParentAlign = MBB.getParent()->getAlignment(); - if (Align <= ParentAlign) - return PO + offsetToAlignment(PO, Align); + const Align ParentAlign = MBB.getParent()->getAlignment(); + if (Alignment <= ParentAlign) + return PO + offsetToAlignment(PO, Alignment); // The alignment of this MBB is larger than the function's alignment, so we // can't tell whether or not it will insert nops. Assume that it will. - return PO + Align.value() + offsetToAlignment(PO, Align); + return PO + Alignment.value() + offsetToAlignment(PO, Alignment); } }; diff --git a/llvm/lib/CodeGen/CallingConvLower.cpp b/llvm/lib/CodeGen/CallingConvLower.cpp --- a/llvm/lib/CodeGen/CallingConvLower.cpp +++ b/llvm/lib/CodeGen/CallingConvLower.cpp @@ -43,8 +43,8 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, int MinSize, int MinAlignment, ISD::ArgFlagsTy ArgFlags) { - llvm::Align MinAlign(MinAlignment); - llvm::Align Align(ArgFlags.getByValAlign()); + Align MinAlign(MinAlignment); + Align Align(ArgFlags.getByValAlign()); unsigned Size = ArgFlags.getByValSize(); if (MinSize > (int)Size) Size = MinSize; @@ -198,7 +198,7 @@ void CCState::getRemainingRegParmsForType(SmallVectorImpl &Regs, MVT VT, CCAssignFn Fn) { unsigned SavedStackOffset = StackOffset; - llvm::Align SavedMaxStackArgAlign = MaxStackArgAlign; + Align SavedMaxStackArgAlign = MaxStackArgAlign; unsigned NumLocs = Locs.size(); // Set the 'inreg' flag if it is used for this calling convention. diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp --- a/llvm/lib/CodeGen/MIRPrinter.cpp +++ b/llvm/lib/CodeGen/MIRPrinter.cpp @@ -629,7 +629,7 @@ OS << "landing-pad"; HasAttributes = true; } - if (MBB.getAlignment() != llvm::Align::None()) { + if (MBB.getAlignment() != Align::None()) { OS << (HasAttributes ? ", " : " ("); OS << "align " << MBB.getAlignment().value(); HasAttributes = true; diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp --- a/llvm/lib/CodeGen/MachineBasicBlock.cpp +++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp @@ -326,7 +326,7 @@ OS << "landing-pad"; HasAttributes = true; } - if (getAlignment() != llvm::Align::None()) { + if (getAlignment() != Align::None()) { OS << (HasAttributes ? ", " : " ("); OS << "align " << Log2(getAlignment()); HasAttributes = true; diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -2807,7 +2807,7 @@ if (!L) continue; - const llvm::Align Align = TLI->getPrefLoopAlignment(L); + const Align Align = TLI->getPrefLoopAlignment(L); if (Align == 1) continue; // Don't care about loop alignment. diff --git a/llvm/lib/CodeGen/MachineFrameInfo.cpp b/llvm/lib/CodeGen/MachineFrameInfo.cpp --- a/llvm/lib/CodeGen/MachineFrameInfo.cpp +++ b/llvm/lib/CodeGen/MachineFrameInfo.cpp @@ -28,7 +28,7 @@ using namespace llvm; -void MachineFrameInfo::ensureMaxAlignment(llvm::Align Align) { +void MachineFrameInfo::ensureMaxAlignment(Align Align) { if (!StackRealignable) assert(Align <= StackAlignment && "For targets without stack realignment, Align is out of limit!"); @@ -36,18 +36,17 @@ } /// Clamp the alignment if requested and emit a warning. -static inline llvm::Align clampStackAlignment(bool ShouldClamp, - llvm::Align Align, - llvm::Align StackAlign) { - if (!ShouldClamp || Align <= StackAlign) - return Align; - LLVM_DEBUG(dbgs() << "Warning: requested alignment " << Align.value() +static inline Align clampStackAlignment(bool ShouldClamp, Align Alignment, + Align StackAlign) { + if (!ShouldClamp || Alignment <= StackAlign) + return Alignment; + LLVM_DEBUG(dbgs() << "Warning: requested alignment " << Alignment.value() << " exceeds the stack alignment " << StackAlign.value() << " when stack realignment is off" << '\n'); return StackAlign; } -int MachineFrameInfo::CreateStackObject(uint64_t Size, llvm::Align Alignment, +int MachineFrameInfo::CreateStackObject(uint64_t Size, Align Alignment, bool IsSpillSlot, const AllocaInst *Alloca, uint8_t StackID) { @@ -62,8 +61,7 @@ return Index; } -int MachineFrameInfo::CreateSpillStackObject(uint64_t Size, - llvm::Align Alignment) { +int MachineFrameInfo::CreateSpillStackObject(uint64_t Size, Align Alignment) { Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment); CreateStackObject(Size, Alignment, true); int Index = (int)Objects.size() - NumFixedObjects - 1; @@ -71,7 +69,7 @@ return Index; } -int MachineFrameInfo::CreateVariableSizedObject(llvm::Align Alignment, +int MachineFrameInfo::CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca) { HasVarSizedObjects = true; Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment); @@ -89,8 +87,8 @@ // object is 16-byte aligned. Note that unlike the non-fixed case, if the // stack needs realignment, we can't assume that the stack will in fact be // aligned. - llvm::Align Alignment = commonAlignment( - ForcedRealign ? llvm::Align::None() : StackAlignment, SPOffset); + Align Alignment = + commonAlignment(ForcedRealign ? Align::None() : StackAlignment, SPOffset); Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment); Objects.insert(Objects.begin(), StackObject(Size, Alignment, SPOffset, IsImmutable, @@ -102,8 +100,8 @@ int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable) { - llvm::Align Alignment = commonAlignment( - ForcedRealign ? llvm::Align::None() : StackAlignment, SPOffset); + Align Alignment = + commonAlignment(ForcedRealign ? Align::None() : StackAlignment, SPOffset); Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment); Objects.insert(Objects.begin(), StackObject(Size, Alignment, SPOffset, IsImmutable, diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1898,7 +1898,7 @@ EVT VT = Node->getValueType(0); SDValue Tmp1 = Node->getOperand(0); SDValue Tmp2 = Node->getOperand(1); - const llvm::MaybeAlign MA(Node->getConstantOperandVal(3)); + const MaybeAlign MA(Node->getConstantOperandVal(3)); SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, Tmp2, MachinePointerInfo(V)); diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -51,7 +51,7 @@ // Loop over each of the elements, placing them in memory. for (unsigned i = 0, e = NumElements; i != e; ++i) { Type *Ty = ST->getElementType(i); - const llvm::Align TyAlign(ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty)); + const Align TyAlign(ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty)); // Add padding if necessary to align the data element properly. if (!isAligned(TyAlign, StructSize)) { @@ -98,10 +98,8 @@ // LayoutAlignElem, LayoutAlign support //===----------------------------------------------------------------------===// -LayoutAlignElem LayoutAlignElem::get(AlignTypeEnum align_type, - llvm::Align abi_align, - llvm::Align pref_align, - uint32_t bit_width) { +LayoutAlignElem LayoutAlignElem::get(AlignTypeEnum align_type, Align abi_align, + Align pref_align, uint32_t bit_width) { assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); LayoutAlignElem retval; retval.AlignType = align_type; @@ -123,10 +121,8 @@ // PointerAlignElem, PointerAlign support //===----------------------------------------------------------------------===// -PointerAlignElem PointerAlignElem::get(uint32_t AddressSpace, - llvm::Align ABIAlign, - llvm::Align PrefAlign, - uint32_t TypeByteWidth, +PointerAlignElem PointerAlignElem::get(uint32_t AddressSpace, Align ABIAlign, + Align PrefAlign, uint32_t TypeByteWidth, uint32_t IndexWidth) { assert(ABIAlign <= PrefAlign && "Preferred alignment worse than ABI!"); PointerAlignElem retval; @@ -486,8 +482,8 @@ }); } -void DataLayout::setAlignment(AlignTypeEnum align_type, llvm::Align abi_align, - llvm::Align pref_align, uint32_t bit_width) { +void DataLayout::setAlignment(AlignTypeEnum align_type, Align abi_align, + Align pref_align, uint32_t bit_width) { // AlignmentsTy::ABIAlign and AlignmentsTy::PrefAlign were once stored as // uint16_t, it is unclear if there are requirements for alignment to be less // than 2^16 other than storage. In the meantime we leave the restriction as @@ -520,9 +516,8 @@ }); } -void DataLayout::setPointerAlignment(uint32_t AddrSpace, llvm::Align ABIAlign, - llvm::Align PrefAlign, - uint32_t TypeByteWidth, +void DataLayout::setPointerAlignment(uint32_t AddrSpace, Align ABIAlign, + Align PrefAlign, uint32_t TypeByteWidth, uint32_t IndexWidth) { if (PrefAlign < ABIAlign) report_fatal_error( @@ -542,9 +537,8 @@ /// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or /// preferred if ABIInfo = false) the layout wants for the specified datatype. -llvm::Align DataLayout::getAlignmentInfo(AlignTypeEnum AlignType, - uint32_t BitWidth, bool ABIInfo, - Type *Ty) const { +Align DataLayout::getAlignmentInfo(AlignTypeEnum AlignType, uint32_t BitWidth, + bool ABIInfo, Type *Ty) const { AlignmentsTy::const_iterator I = findAlignmentLowerBound(AlignType, BitWidth); // See if we found an exact match. Of if we are looking for an integer type, // but don't have an exact match take the next largest integer. This is where @@ -638,7 +632,7 @@ return L; } -llvm::Align DataLayout::getPointerABIAlignment(unsigned AS) const { +Align DataLayout::getPointerABIAlignment(unsigned AS) const { PointersTy::const_iterator I = findPointerLowerBound(AS); if (I == Pointers.end() || I->AddressSpace != AS) { I = findPointerLowerBound(0); @@ -647,7 +641,7 @@ return I->ABIAlign; } -llvm::Align DataLayout::getPointerPrefAlignment(unsigned AS) const { +Align DataLayout::getPointerPrefAlignment(unsigned AS) const { PointersTy::const_iterator I = findPointerLowerBound(AS); if (I == Pointers.end() || I->AddressSpace != AS) { I = findPointerLowerBound(0); @@ -704,7 +698,7 @@ Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref == false) for the requested type \a Ty. */ -llvm::Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const { +Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const { AlignTypeEnum AlignType; assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); @@ -723,12 +717,11 @@ case Type::StructTyID: { // Packed structure types always have an ABI alignment of one. if (cast(Ty)->isPacked() && abi_or_pref) - return llvm::Align::None(); + return Align::None(); // Get the layout annotation... which is lazily created on demand. const StructLayout *Layout = getStructLayout(cast(Ty)); - const llvm::Align Align = - getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty); + const Align Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty); return std::max(Align, Layout->getAlignment()); } case Type::IntegerTyID: @@ -761,7 +754,7 @@ /// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for /// an integer type of the specified bitwidth. -llvm::Align DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const { +Align DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const { return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, nullptr); } diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -1248,7 +1248,7 @@ setAlignment(llvm::MaybeAlign(Align)); } -void AllocaInst::setAlignment(llvm::MaybeAlign Align) { +void AllocaInst::setAlignment(MaybeAlign Align) { assert((!Align || *Align <= MaximumAlignment) && "Alignment is greater than MaximumAlignment!"); setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | @@ -1343,7 +1343,7 @@ setAlignment(llvm::MaybeAlign(Align)); } -void LoadInst::setAlignment(llvm::MaybeAlign Align) { +void LoadInst::setAlignment(MaybeAlign Align) { assert((!Align || *Align <= MaximumAlignment) && "Alignment is greater than MaximumAlignment!"); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | @@ -1430,7 +1430,7 @@ setAlignment(llvm::MaybeAlign(Align)); } -void StoreInst::setAlignment(llvm::MaybeAlign Align) { +void StoreInst::setAlignment(MaybeAlign Align) { assert((!Align || *Align <= MaximumAlignment) && "Alignment is greater than MaximumAlignment!"); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -667,7 +667,7 @@ assert(getType()->isPointerTy() && "must be pointer"); if (auto *GO = dyn_cast(this)) { if (isa(GO)) { - const llvm::MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign(); + const MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign(); const unsigned Align = FunctionPtrAlign ? FunctionPtrAlign->value() : 0; switch (DL.getFunctionPtrAlignType()) { case DataLayout::FunctionPtrAlignType::Independent: diff --git a/llvm/lib/Object/ArchiveWriter.cpp b/llvm/lib/Object/ArchiveWriter.cpp --- a/llvm/lib/Object/ArchiveWriter.cpp +++ b/llvm/lib/Object/ArchiveWriter.cpp @@ -308,7 +308,7 @@ // least 4-byte aligned for 32-bit content. Opt for the larger encoding // uniformly. // We do this for all bsd formats because it simplifies aligning members. - const llvm::Align Alignment(isBSDLike(Kind) ? 8 : 2); + const Align Alignment(isBSDLike(Kind) ? 8 : 2); unsigned Pad = offsetToAlignment(Size, Alignment); Size += Pad; diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp --- a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp +++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp @@ -40,10 +40,10 @@ MVT LocVT, ISD::ArgFlagsTy &ArgFlags, CCState &State, unsigned SlotAlign) { unsigned Size = LocVT.getSizeInBits() / 8; - const llvm::Align StackAlign = + const Align StackAlign = State.getMachineFunction().getDataLayout().getStackAlignment(); - const llvm::Align OrigAlign(ArgFlags.getOrigAlign()); - const llvm::Align Align = std::min(OrigAlign, StackAlign); + const Align OrigAlign(ArgFlags.getOrigAlign()); + const Align Align = std::min(OrigAlign, StackAlign); for (auto &It : PendingMembers) { It.convertToMem(State.AllocateStack( diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -384,7 +384,7 @@ unsigned Depth = 0) const override; AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; - llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override; + Align getPrefLoopAlignment(MachineLoop *ML) const override; void allocateHSAUserSGPRs(CCState &CCInfo, MachineFunction &MF, diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -10684,9 +10684,9 @@ Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); } -llvm::Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { - const llvm::Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML); - const llvm::Align CacheLineAlign = llvm::Align(64); +Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { + const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML); + const Align CacheLineAlign = llvm::Align(64); // Pre-GFX10 target did not benefit from loop alignment if (!ML || DisableLoopAlignment || diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.h b/llvm/lib/Target/ARM/ARMBasicBlockInfo.h --- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.h +++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.h @@ -29,7 +29,7 @@ /// /// @param Align alignment /// @param KnownBits Number of known low offset bits. -inline unsigned UnknownPadding(llvm::Align Align, unsigned KnownBits) { +inline unsigned UnknownPadding(Align Align, unsigned KnownBits) { if (KnownBits < Log2(Align)) return Align.value() - (1ull << KnownBits); return 0; @@ -67,7 +67,7 @@ /// PostAlign - When > 1, the block terminator contains a .align /// directive, so the end of the block is aligned to PostAlign bytes. - llvm::Align PostAlign; + Align PostAlign; BasicBlockInfo() = default; @@ -86,10 +86,10 @@ /// Compute the offset immediately following this block. If Align is /// specified, return the offset the successor block will get if it has /// this alignment. - unsigned postOffset(llvm::Align Align = llvm::Align::None()) const { + unsigned postOffset(Align Alignment = Align::None()) const { unsigned PO = Offset + Size; - const llvm::Align PA = std::max(PostAlign, Align); - if (PA == llvm::Align::None()) + const Align PA = std::max(PostAlign, Alignment); + if (PA == Align::None()) return PO; // Add alignment padding from the terminator. return PO + UnknownPadding(PA, internalKnownBits()); @@ -100,7 +100,7 @@ /// instruction alignment. An aligned terminator may increase the number /// of know bits. /// If LogAlign is given, also consider the alignment of the next block. - unsigned postKnownBits(llvm::Align Align = llvm::Align::None()) const { + unsigned postKnownBits(Align Align = Align::None()) const { return std::max(Log2(std::max(PostAlign, Align)), internalKnownBits()); } }; diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp --- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp @@ -47,7 +47,7 @@ BasicBlockInfo &BBI = BBInfo[MBB->getNumber()]; BBI.Size = 0; BBI.Unalign = 0; - BBI.PostAlign = llvm::Align::None(); + BBI.PostAlign = Align::None(); for (MachineInstr &I : *MBB) { BBI.Size += TII->getInstSizeInBytes(I); @@ -126,7 +126,7 @@ for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) { // Get the offset and known bits at the end of the layout predecessor. // Include the alignment of the current block. - const llvm::Align Align = MF.getBlockNumbered(i)->getAlignment(); + const Align Align = MF.getBlockNumbered(i)->getAlignment(); const unsigned Offset = BBInfo[i - 1].postOffset(Align); const unsigned KnownBits = BBInfo[i - 1].postKnownBits(Align); diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp --- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -247,7 +247,7 @@ void doInitialJumpTablePlacement(std::vector &CPEMIs); bool BBHasFallthrough(MachineBasicBlock *MBB); CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI); - llvm::Align getCPEAlign(const MachineInstr *CPEMI); + Align getCPEAlign(const MachineInstr *CPEMI); void scanFunctionJumpTables(); void initializeFunctionInfo(const std::vector &CPEMIs); MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI); @@ -494,7 +494,7 @@ MF->push_back(BB); // MachineConstantPool measures alignment in bytes. - const llvm::Align MaxAlign(MCP->getConstantPoolAlignment()); + const Align MaxAlign(MCP->getConstantPoolAlignment()); const unsigned MaxLogAlign = Log2(MaxAlign); // Mark the basic block as required by the const-pool. @@ -650,7 +650,7 @@ /// getCPEAlign - Returns the required alignment of the constant pool entry /// represented by CPEMI. -llvm::Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) { +Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) { switch (CPEMI->getOpcode()) { case ARM::CONSTPOOL_ENTRY: break; @@ -1021,10 +1021,10 @@ MachineBasicBlock* Water, CPUser &U, unsigned &Growth) { BBInfoVector &BBInfo = BBUtils->getBBInfo(); - const llvm::Align CPEAlign = getCPEAlign(U.CPEMI); + const Align CPEAlign = getCPEAlign(U.CPEMI); const unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPEAlign); unsigned NextBlockOffset; - llvm::Align NextBlockAlignment; + Align NextBlockAlignment; MachineFunction::const_iterator NextBlock = Water->getIterator(); if (++NextBlock == MF->end()) { NextBlockOffset = BBInfo[Water->getNumber()].postOffset(); @@ -1214,7 +1214,7 @@ // inserting islands between BB0 and BB1 makes other accesses out of range. MachineBasicBlock *UserBB = U.MI->getParent(); BBInfoVector &BBInfo = BBUtils->getBBInfo(); - const llvm::Align CPEAlign = getCPEAlign(U.CPEMI); + const Align CPEAlign = getCPEAlign(U.CPEMI); unsigned MinNoSplitDisp = BBInfo[UserBB->getNumber()].postOffset(CPEAlign); if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2) return false; @@ -1268,7 +1268,7 @@ CPUser &U = CPUsers[CPUserIndex]; MachineInstr *UserMI = U.MI; MachineInstr *CPEMI = U.CPEMI; - const llvm::Align CPEAlign = getCPEAlign(CPEMI); + const Align CPEAlign = getCPEAlign(CPEMI); MachineBasicBlock *UserMBB = UserMI->getParent(); BBInfoVector &BBInfo = BBUtils->getBBInfo(); const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()]; @@ -1323,7 +1323,7 @@ // Try to split the block so it's fully aligned. Compute the latest split // point where we can add a 4-byte branch instruction, and then align to // Align which is the largest possible alignment in the function. - const llvm::Align Align = MF->getAlignment(); + const Align Align = MF->getAlignment(); assert(Align >= CPEAlign && "Over-aligned constant pool entry"); unsigned KnownBits = UserBBI.internalKnownBits(); unsigned UPad = UnknownPadding(Align, KnownBits); @@ -1501,7 +1501,7 @@ // Always align the new block because CP entries can be smaller than 4 // bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may // be an already aligned constant pool block. - const llvm::Align Align = isThumb ? llvm::Align(2) : llvm::Align(4); + const Align Align = isThumb ? llvm::Align(2) : llvm::Align(4); if (NewMBB->getAlignment() < Align) NewMBB->setAlignment(Align); @@ -1566,7 +1566,7 @@ BBInfo[CPEBB->getNumber()].Size = 0; // This block no longer needs to be aligned. - CPEBB->setAlignment(llvm::Align::None()); + CPEBB->setAlignment(Align::None()); } else { // Entries are sorted by descending alignment, so realign from the front. CPEBB->setAlignment(getCPEAlign(&*CPEBB->begin())); diff --git a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp --- a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp +++ b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp @@ -105,7 +105,7 @@ // offset of the current instruction from the start. unsigned InstOffset = 0; for (auto &B : MF) { - if (B.getAlignment() != llvm::Align::None()) { + if (B.getAlignment() != Align::None()) { // Although we don't know the exact layout of the final code, we need // to account for alignment padding somehow. This heuristic pads each // aligned basic block according to the alignment value. diff --git a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp --- a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp @@ -114,7 +114,7 @@ // First pass - compute the offset of each basic block. for (const MachineBasicBlock &MBB : MF) { - if (MBB.getAlignment() != llvm::Align::None()) { + if (MBB.getAlignment() != Align::None()) { // Although we don't know the exact layout of the final code, we need // to account for alignment padding somehow. This heuristic pads each // aligned basic block according to the alignment value. diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h @@ -15,7 +15,7 @@ namespace llvm { // NaCl MIPS sandbox's instruction bundle size. -static const llvm::Align MIPS_NACL_BUNDLE_ALIGN = llvm::Align(16); +static const Align MIPS_NACL_BUNDLE_ALIGN = llvm::Align(16); bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx, bool *IsStore = nullptr); diff --git a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp --- a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -371,7 +371,7 @@ void doInitialPlacement(std::vector &CPEMIs); CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI); - llvm::Align getCPEAlign(const MachineInstr &CPEMI); + Align getCPEAlign(const MachineInstr &CPEMI); void initializeFunctionInfo(const std::vector &CPEMIs); unsigned getOffsetOf(MachineInstr *MI) const; unsigned getUserOffset(CPUser&) const; @@ -529,7 +529,7 @@ MF->push_back(BB); // MachineConstantPool measures alignment in bytes. We measure in log2(bytes). - const llvm::Align MaxAlign(MCP->getConstantPoolAlignment()); + const Align MaxAlign(MCP->getConstantPoolAlignment()); // Mark the basic block as required by the const-pool. // If AlignConstantIslands isn't set, use 4-byte alignment for everything. @@ -619,7 +619,7 @@ /// getCPEAlign - Returns the required alignment of the constant pool entry /// represented by CPEMI. Alignment is measured in log2(bytes) units. -llvm::Align MipsConstantIslands::getCPEAlign(const MachineInstr &CPEMI) { +Align MipsConstantIslands::getCPEAlign(const MachineInstr &CPEMI) { assert(CPEMI.getOpcode() == Mips::CONSTPOOL_ENTRY); // Everything is 4-byte aligned unless AlignConstantIslands is set. @@ -936,11 +936,11 @@ unsigned &Growth) { unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(); unsigned NextBlockOffset; - llvm::Align NextBlockAlignment; + Align NextBlockAlignment; MachineFunction::const_iterator NextBlock = ++Water->getIterator(); if (NextBlock == MF->end()) { NextBlockOffset = BBInfo[Water->getNumber()].postOffset(); - NextBlockAlignment = llvm::Align::None(); + NextBlockAlignment = Align::None(); } else { NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset; NextBlockAlignment = NextBlock->getAlignment(); @@ -1251,7 +1251,7 @@ // Try to split the block so it's fully aligned. Compute the latest split // point where we can add a 4-byte branch instruction, and then align to // Align which is the largest possible alignment in the function. - const llvm::Align Align = MF->getAlignment(); + const Align Align = MF->getAlignment(); unsigned BaseInsertOffset = UserOffset + U.getMaxDisp(); LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x", BaseInsertOffset)); diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -2148,7 +2148,7 @@ EVT VT = Node->getValueType(0); SDValue Chain = Node->getOperand(0); SDValue VAListPtr = Node->getOperand(1); - const llvm::Align Align = + const Align Align = llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne(); const Value *SV = cast(Node->getOperand(2))->getValue(); SDLoc DL(Node); diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp --- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -247,7 +247,7 @@ Base = Addr.getOperand(0); // If base is a FI, additional offset calculation is done in // eliminateFrameIndex, otherwise we need to check the alignment - const llvm::Align Align(1ULL << ShiftAmount); + const Align Align(1ULL << ShiftAmount); if (!isAligned(Align, CN->getZExtValue())) return false; } diff --git a/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp b/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp --- a/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp +++ b/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp @@ -212,7 +212,7 @@ // element size), otherwise it is a 16-bit signed immediate. unsigned OffsetBitSize = getLoadStoreOffsetSizeInBits(MI.getOpcode(), MI.getOperand(OpNo - 1)); - const llvm::Align OffsetAlign(getLoadStoreOffsetAlign(MI.getOpcode())); + const Align OffsetAlign(getLoadStoreOffsetAlign(MI.getOpcode())); if (OffsetBitSize < 16 && isInt<16>(Offset) && (!isIntN(OffsetBitSize, Offset) || !isAligned(OffsetAlign, Offset))) { // If we have an offset that needs to fit into a signed n-bit immediate diff --git a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp --- a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp +++ b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp @@ -81,20 +81,20 @@ /// original Offset. unsigned PPCBSel::GetAlignmentAdjustment(MachineBasicBlock &MBB, unsigned Offset) { - const llvm::Align Align = MBB.getAlignment(); - if (Align == 1) + const Align Alignment = MBB.getAlignment(); + if (Alignment == Align::None()) return 0; - const llvm::Align ParentAlign = MBB.getParent()->getAlignment(); + const Align ParentAlign = MBB.getParent()->getAlignment(); - if (Align <= ParentAlign) - return offsetToAlignment(Offset, Align); + if (Alignment <= ParentAlign) + return offsetToAlignment(Offset, Alignment); // The alignment of this MBB is larger than the function's alignment, so we // can't tell whether or not it will insert nops. Assume that it will. if (FirstImpreciseBlock < 0) FirstImpreciseBlock = MBB.getNumber(); - return Align.value() + offsetToAlignment(Offset, Align); + return Alignment.value() + offsetToAlignment(Offset, Alignment); } /// We need to be careful about the offset of the first block in the function @@ -178,7 +178,7 @@ const MachineBasicBlock *Dest, unsigned BrOffset) { int BranchSize; - llvm::Align MaxAlign = llvm::Align(4); + Align MaxAlign = llvm::Align(4); bool NeedExtraAdjustment = false; if (Dest->getNumber() <= Src->getNumber()) { // If this is a backwards branch, the delta is the offset from the diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -742,7 +742,7 @@ const SelectionDAG &DAG, unsigned Depth = 0) const override; - llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override; + Align getPrefLoopAlignment(MachineLoop *ML) const override; bool shouldInsertFencesForAtomic(const Instruction *I) const override { return true; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -14110,7 +14110,7 @@ } } -llvm::Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { +Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { switch (Subtarget.getDarwinDirective()) { default: break; case PPC::DIR_970: diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -198,7 +198,7 @@ setBooleanContents(ZeroOrOneBooleanContent); // Function alignments. - const llvm::Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); + const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); setMinFunctionAlignment(FunctionAlignment); setPrefFunctionAlignment(FunctionAlignment); diff --git a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp --- a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp +++ b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp @@ -87,7 +87,7 @@ // The minimum alignment of the block. // This value never changes. - llvm::Align Alignment; + Align Alignment; // The number of terminators in this block. This value never changes. unsigned NumTerminators = 0; diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h --- a/llvm/lib/Target/X86/X86TargetTransformInfo.h +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h @@ -187,8 +187,8 @@ bool canMacroFuseCmp(); bool isLegalMaskedLoad(Type *DataType); bool isLegalMaskedStore(Type *DataType); - bool isLegalNTLoad(Type *DataType, llvm::Align Alignment); - bool isLegalNTStore(Type *DataType, llvm::Align Alignment); + bool isLegalNTLoad(Type *DataType, Align Alignment); + bool isLegalNTStore(Type *DataType, Align Alignment); bool isLegalMaskedGather(Type *DataType); bool isLegalMaskedScatter(Type *DataType); bool isLegalMaskedExpandLoad(Type *DataType); diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -3294,7 +3294,7 @@ return isLegalMaskedLoad(DataType); } -bool X86TTIImpl::isLegalNTLoad(Type *DataType, llvm::Align Alignment) { +bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) { unsigned DataSize = DL.getTypeStoreSize(DataType); // The only supported nontemporal loads are for aligned vectors of 16 or 32 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2 @@ -3305,7 +3305,7 @@ return false; } -bool X86TTIImpl::isLegalNTStore(Type *DataType, llvm::Align Alignment) { +bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) { unsigned DataSize = DL.getTypeStoreSize(DataType); // SSE4A supports nontemporal stores of float and double at arbitrary diff --git a/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp b/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp --- a/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp +++ b/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp @@ -115,7 +115,7 @@ MCSymbol *GVSym = getSymbol(GV); const Constant *C = GV->getInitializer(); - const llvm::Align Align(DL.getPrefTypeAlignment(C->getType())); + const Align Align(DL.getPrefTypeAlignment(C->getType())); // Mark the start of the global getTargetStreamer().emitCCTopData(GVSym->getName());