diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -580,9 +580,9 @@ bool isLegalMaskedLoad(Type *DataType) const; /// Return true if the target supports nontemporal store. - bool isLegalNTStore(Type *DataType, llvm::Align Alignment) const; + bool isLegalNTStore(Type *DataType, Align Alignment) const; /// Return true if the target supports nontemporal load. - bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) const; + bool isLegalNTLoad(Type *DataType, Align Alignment) const; /// Return true if the target supports masked scatter. bool isLegalMaskedScatter(Type *DataType) const; @@ -1196,8 +1196,8 @@ virtual bool shouldFavorBackedgeIndex(const Loop *L) const = 0; virtual bool isLegalMaskedStore(Type *DataType) = 0; virtual bool isLegalMaskedLoad(Type *DataType) = 0; - virtual bool isLegalNTStore(Type *DataType, llvm::Align Alignment) = 0; - virtual bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) = 0; + virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0; + virtual bool isLegalNTLoad(Type *DataType, Align Alignment) = 0; virtual bool isLegalMaskedScatter(Type *DataType) = 0; virtual bool isLegalMaskedGather(Type *DataType) = 0; virtual bool isLegalMaskedCompressStore(Type *DataType) = 0; @@ -1471,10 +1471,10 @@ bool isLegalMaskedLoad(Type *DataType) override { return Impl.isLegalMaskedLoad(DataType); } - bool isLegalNTStore(Type *DataType, llvm::Align Alignment) override { + bool isLegalNTStore(Type *DataType, Align Alignment) override { return Impl.isLegalNTStore(DataType, Alignment); } - bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) override { + bool isLegalNTLoad(Type *DataType, Align Alignment) override { return Impl.isLegalNTLoad(DataType, Alignment); } bool isLegalMaskedScatter(Type *DataType) override { diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -247,14 +247,14 @@ bool isLegalMaskedLoad(Type *DataType) { return false; } - bool isLegalNTStore(Type *DataType, llvm::Align Alignment) { + bool isLegalNTStore(Type *DataType, Align Alignment) { // By default, assume nontemporal memory stores are available for stores // that are aligned and have a size that is a power of 2. unsigned DataSize = DL.getTypeStoreSize(DataType); return Alignment >= DataSize && isPowerOf2_32(DataSize); } - bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) { + bool isLegalNTLoad(Type *DataType, Align Alignment) { // By default, assume nontemporal memory loads are available for loads that // are aligned and have a size that is a power of 2. unsigned DataSize = DL.getTypeStoreSize(DataType); diff --git a/llvm/include/llvm/CodeGen/AsmPrinter.h b/llvm/include/llvm/CodeGen/AsmPrinter.h --- a/llvm/include/llvm/CodeGen/AsmPrinter.h +++ b/llvm/include/llvm/CodeGen/AsmPrinter.h @@ -350,7 +350,7 @@ /// global value is specified, and if that global has an explicit alignment /// requested, it will override the alignment request if required for /// correctness. - void EmitAlignment(llvm::Align Align, const GlobalObject *GV = nullptr) const; + void EmitAlignment(Align Alignment, const GlobalObject *GV = nullptr) const; /// Lower the specified LLVM Constant to an MCExpr. virtual const MCExpr *lowerConstant(const Constant *CV); @@ -643,8 +643,8 @@ void EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const; /// Return the alignment for the specified \p GV. - static llvm::Align getGVAlignment(const GlobalValue *GV, const DataLayout &DL, - llvm::Align InAlign = llvm::Align::None()); + static Align getGVAlignment(const GlobalValue *GV, const DataLayout &DL, + Align InAlign = Align::None()); private: /// Private state for PrintSpecial() diff --git a/llvm/include/llvm/CodeGen/CallingConvLower.h b/llvm/include/llvm/CodeGen/CallingConvLower.h --- a/llvm/include/llvm/CodeGen/CallingConvLower.h +++ b/llvm/include/llvm/CodeGen/CallingConvLower.h @@ -424,18 +424,18 @@ /// AllocateStack - Allocate a chunk of stack space with the specified size /// and alignment. unsigned AllocateStack(unsigned Size, unsigned Alignment) { - const llvm::Align Align(Alignment); - StackOffset = alignTo(StackOffset, Align); + const Align CheckedAlignment(Alignment); + StackOffset = alignTo(StackOffset, CheckedAlignment); unsigned Result = StackOffset; StackOffset += Size; - MaxStackArgAlign = std::max(Align, MaxStackArgAlign); - ensureMaxAlignment(Align); + MaxStackArgAlign = std::max(CheckedAlignment, MaxStackArgAlign); + ensureMaxAlignment(CheckedAlignment); return Result; } - void ensureMaxAlignment(llvm::Align Align) { + void ensureMaxAlignment(Align Alignment) { if (!AnalyzingMustTailForwardedRegs) - MF.getFrameInfo().ensureMaxAlignment(Align.value()); + MF.getFrameInfo().ensureMaxAlignment(Alignment.value()); } /// Version of AllocateStack with extra register to be shadowed. diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h --- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h +++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h @@ -105,7 +105,7 @@ /// Alignment of the basic block. One if the basic block does not need to be /// aligned. - llvm::Align Alignment; + Align Alignment; /// Indicate that this basic block is entered via an exception handler. bool IsEHPad = false; @@ -373,10 +373,10 @@ const uint32_t *getEndClobberMask(const TargetRegisterInfo *TRI) const; /// Return alignment of the basic block. - llvm::Align getAlignment() const { return Alignment; } + Align getAlignment() const { return Alignment; } /// Set alignment of the basic block. - void setAlignment(llvm::Align A) { Alignment = A; } + void setAlignment(Align A) { Alignment = A; } /// Returns true if the block is a landing pad. That is this basic block is /// entered via an exception handler. diff --git a/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/llvm/include/llvm/CodeGen/MachineFrameInfo.h --- a/llvm/include/llvm/CodeGen/MachineFrameInfo.h +++ b/llvm/include/llvm/CodeGen/MachineFrameInfo.h @@ -181,7 +181,7 @@ uint8_t SSPLayout; - StackObject(uint64_t Size, llvm::Align Alignment, int64_t SPOffset, + StackObject(uint64_t Size, Align Alignment, int64_t SPOffset, bool IsImmutable, bool IsSpillSlot, const AllocaInst *Alloca, bool IsAliased, uint8_t StackID = 0) : SPOffset(SPOffset), Size(Size), Alignment(Alignment), @@ -419,7 +419,9 @@ /// Required alignment of the local object blob, /// which is the strictest alignment of any object in it. - void setLocalFrameMaxAlign(Align Align) { LocalFrameMaxAlign = Align; } + void setLocalFrameMaxAlign(Align Alignment) { + LocalFrameMaxAlign = Alignment; + } /// Return the required alignment of the local object blob. Align getLocalFrameMaxAlign() const { return LocalFrameMaxAlign; } @@ -564,7 +566,7 @@ unsigned getMaxAlignment() const { return MaxAlignment.value(); } /// Make sure the function is at least Align bytes aligned. - void ensureMaxAlignment(llvm::Align Align); + void ensureMaxAlignment(Align Alignment); /// FIXME: Remove this once transition to Align is over. inline void ensureMaxAlignment(unsigned Align) { ensureMaxAlignment(assumeAligned(Align)); @@ -732,9 +734,9 @@ /// Create a new statically sized stack object, returning /// a nonnegative identifier to represent it. - int CreateStackObject(uint64_t Size, llvm::Align Alignment, bool isSpillSlot, + int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca = nullptr, uint8_t ID = 0); - /// FIXME: Remove this function when transition to llvm::Align is over. + /// FIXME: Remove this function when transition to Align is over. inline int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSpillSlot, const AllocaInst *Alloca = nullptr, @@ -745,8 +747,8 @@ /// Create a new statically sized stack object that represents a spill slot, /// returning a nonnegative identifier to represent it. - int CreateSpillStackObject(uint64_t Size, llvm::Align Alignment); - /// FIXME: Remove this function when transition to llvm::Align is over. + int CreateSpillStackObject(uint64_t Size, Align Alignment); + /// FIXME: Remove this function when transition to Align is over. inline int CreateSpillStackObject(uint64_t Size, unsigned Alignment) { return CreateSpillStackObject(Size, assumeAligned(Alignment)); } @@ -760,9 +762,8 @@ /// Notify the MachineFrameInfo object that a variable sized object has been /// created. This must be created whenever a variable sized object is /// created, whether or not the index returned is actually used. - int CreateVariableSizedObject(llvm::Align Alignment, - const AllocaInst *Alloca); - /// FIXME: Remove this function when transition to llvm::Align is over. + int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca); + /// FIXME: Remove this function when transition to Align is over. int CreateVariableSizedObject(unsigned Alignment, const AllocaInst *Alloca) { return CreateVariableSizedObject(assumeAligned(Alignment), Alloca); } diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h --- a/llvm/include/llvm/CodeGen/MachineFunction.h +++ b/llvm/include/llvm/CodeGen/MachineFunction.h @@ -277,7 +277,7 @@ unsigned FunctionNumber; /// Alignment - The alignment of the function. - llvm::Align Alignment; + Align Alignment; /// ExposesReturnsTwice - True if the function calls setjmp or related /// functions with attribute "returns twice", but doesn't have @@ -509,13 +509,13 @@ WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; } /// getAlignment - Return the alignment of the function. - llvm::Align getAlignment() const { return Alignment; } + Align getAlignment() const { return Alignment; } /// setAlignment - Set the alignment of the function. - void setAlignment(llvm::Align A) { Alignment = A; } + void setAlignment(Align A) { Alignment = A; } /// ensureAlignment - Make sure the function is at least A bytes aligned. - void ensureAlignment(llvm::Align A) { + void ensureAlignment(Align A) { if (Alignment < A) Alignment = A; } diff --git a/llvm/include/llvm/CodeGen/TargetCallingConv.h b/llvm/include/llvm/CodeGen/TargetCallingConv.h --- a/llvm/include/llvm/CodeGen/TargetCallingConv.h +++ b/llvm/include/llvm/CodeGen/TargetCallingConv.h @@ -126,7 +126,7 @@ return A ? A->value() : 0; } void setByValAlign(unsigned A) { - ByValAlign = encode(llvm::Align(A)); + ByValAlign = encode(Align(A)); assert(getByValAlign() == A && "bitfield overflow"); } @@ -135,7 +135,7 @@ return A ? A->value() : 0; } void setOrigAlign(unsigned A) { - OrigAlign = encode(llvm::Align(A)); + OrigAlign = encode(Align(A)); assert(getOrigAlign() == A && "bitfield overflow"); } diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -1597,18 +1597,18 @@ } /// Return the minimum stack alignment of an argument. - llvm::Align getMinStackArgumentAlignment() const { + Align getMinStackArgumentAlignment() const { return MinStackArgumentAlignment; } /// Return the minimum function alignment. - llvm::Align getMinFunctionAlignment() const { return MinFunctionAlignment; } + Align getMinFunctionAlignment() const { return MinFunctionAlignment; } /// Return the preferred function alignment. - llvm::Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; } + Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; } /// Return the preferred loop alignment. - virtual llvm::Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const { + virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const { return PrefLoopAlignment; } @@ -2120,24 +2120,24 @@ } /// Set the target's minimum function alignment. - void setMinFunctionAlignment(llvm::Align Align) { - MinFunctionAlignment = Align; + void setMinFunctionAlignment(Align Alignment) { + MinFunctionAlignment = Alignment; } /// Set the target's preferred function alignment. This should be set if /// there is a performance benefit to higher-than-minimum alignment - void setPrefFunctionAlignment(llvm::Align Align) { - PrefFunctionAlignment = Align; + void setPrefFunctionAlignment(Align Alignment) { + PrefFunctionAlignment = Alignment; } /// Set the target's preferred loop alignment. Default alignment is one, it /// means the target does not care about loop alignment. The target may also /// override getPrefLoopAlignment to provide per-loop values. - void setPrefLoopAlignment(llvm::Align Align) { PrefLoopAlignment = Align; } + void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; } /// Set the minimum stack alignment of an argument. - void setMinStackArgumentAlignment(llvm::Align Align) { - MinStackArgumentAlignment = Align; + void setMinStackArgumentAlignment(Align Alignment) { + MinStackArgumentAlignment = Alignment; } /// Set the maximum atomic operation size supported by the @@ -2699,18 +2699,18 @@ Sched::Preference SchedPreferenceInfo; /// The minimum alignment that any argument on the stack needs to have. - llvm::Align MinStackArgumentAlignment; + Align MinStackArgumentAlignment; /// The minimum function alignment (used when optimizing for size, and to /// prevent explicitly provided alignment from leading to incorrect code). - llvm::Align MinFunctionAlignment; + Align MinFunctionAlignment; /// The preferred function alignment (used when alignment unspecified and /// optimizing for speed). - llvm::Align PrefFunctionAlignment; + Align PrefFunctionAlignment; /// The preferred loop alignment (in log2 bot in bytes). - llvm::Align PrefLoopAlignment; + Align PrefLoopAlignment; /// Size in bits of the maximum atomics size the backend supports. /// Accesses larger than this will be expanded by AtomicExpandPass. diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h --- a/llvm/include/llvm/IR/DataLayout.h +++ b/llvm/include/llvm/IR/DataLayout.h @@ -72,11 +72,11 @@ /// Alignment type from \c AlignTypeEnum unsigned AlignType : 8; unsigned TypeBitWidth : 24; - llvm::Align ABIAlign; - llvm::Align PrefAlign; + Align ABIAlign; + Align PrefAlign; - static LayoutAlignElem get(AlignTypeEnum align_type, llvm::Align abi_align, - llvm::Align pref_align, uint32_t bit_width); + static LayoutAlignElem get(AlignTypeEnum align_type, Align abi_align, + Align pref_align, uint32_t bit_width); bool operator==(const LayoutAlignElem &rhs) const; }; @@ -88,15 +88,15 @@ /// \note The unusual order of elements in the structure attempts to reduce /// padding and make the structure slightly more cache friendly. struct PointerAlignElem { - llvm::Align ABIAlign; - llvm::Align PrefAlign; + Align ABIAlign; + Align PrefAlign; uint32_t TypeByteWidth; uint32_t AddressSpace; uint32_t IndexWidth; /// Initializer - static PointerAlignElem get(uint32_t AddressSpace, llvm::Align ABIAlign, - llvm::Align PrefAlign, uint32_t TypeByteWidth, + static PointerAlignElem get(uint32_t AddressSpace, Align ABIAlign, + Align PrefAlign, uint32_t TypeByteWidth, uint32_t IndexWidth); bool operator==(const PointerAlignElem &rhs) const; @@ -173,16 +173,15 @@ /// well-defined bitwise representation. SmallVector NonIntegralAddressSpaces; - void setAlignment(AlignTypeEnum align_type, llvm::Align abi_align, - llvm::Align pref_align, uint32_t bit_width); - llvm::Align getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width, - bool ABIAlign, Type *Ty) const; - void setPointerAlignment(uint32_t AddrSpace, llvm::Align ABIAlign, - llvm::Align PrefAlign, uint32_t TypeByteWidth, - uint32_t IndexWidth); + void setAlignment(AlignTypeEnum align_type, Align abi_align, Align pref_align, + uint32_t bit_width); + Align getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width, + bool ABIAlign, Type *Ty) const; + void setPointerAlignment(uint32_t AddrSpace, Align ABIAlign, Align PrefAlign, + uint32_t TypeByteWidth, uint32_t IndexWidth); /// Internal helper method that returns requested alignment for type. - llvm::Align getAlignment(Type *Ty, bool abi_or_pref) const; + Align getAlignment(Type *Ty, bool abi_or_pref) const; /// Parses a target data specification string. Assert if the string is /// malformed. @@ -262,11 +261,11 @@ bool isIllegalInteger(uint64_t Width) const { return !isLegalInteger(Width); } /// Returns true if the given alignment exceeds the natural stack alignment. - bool exceedsNaturalStackAlignment(llvm::Align Align) const { - return StackNaturalAlign && (Align > StackNaturalAlign); + bool exceedsNaturalStackAlignment(Align Alignment) const { + return StackNaturalAlign && (Alignment > StackNaturalAlign); } - llvm::Align getStackAlignment() const { + Align getStackAlignment() const { assert(StackNaturalAlign && "StackNaturalAlign must be defined"); return *StackNaturalAlign; } @@ -349,12 +348,12 @@ } /// Layout pointer alignment - llvm::Align getPointerABIAlignment(unsigned AS) const; + Align getPointerABIAlignment(unsigned AS) const; /// Return target's alignment for stack-based pointers /// FIXME: The defaults need to be removed once all of /// the backends/clients are updated. - llvm::Align getPointerPrefAlignment(unsigned AS = 0) const; + Align getPointerPrefAlignment(unsigned AS = 0) const; /// Layout pointer size /// FIXME: The defaults need to be removed once all of @@ -490,7 +489,7 @@ /// Returns the minimum ABI-required alignment for an integer type of /// the specified bitwidth. - llvm::Align getABIIntegerTypeAlignment(unsigned BitWidth) const; + Align getABIIntegerTypeAlignment(unsigned BitWidth) const; /// Returns the preferred stack/global alignment for the specified /// type. @@ -562,7 +561,7 @@ /// based on the DataLayout structure. class StructLayout { uint64_t StructSize; - llvm::Align StructAlignment; + Align StructAlignment; unsigned IsPadded : 1; unsigned NumElements : 31; uint64_t MemberOffsets[1]; // variable sized array! @@ -572,7 +571,7 @@ uint64_t getSizeInBits() const { return 8 * StructSize; } - llvm::Align getAlignment() const { return StructAlignment; } + Align getAlignment() const { return StructAlignment; } /// Returns whether the struct has padding or not between its fields. /// NB: Padding in nested element is not taken into account. diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -114,9 +114,9 @@ return MA->value(); return 0; } - // FIXME: Remove once migration to llvm::Align is over. + // FIXME: Remove once migration to Align is over. void setAlignment(unsigned Align); - void setAlignment(llvm::MaybeAlign Align); + void setAlignment(MaybeAlign Align); /// Return true if this alloca is in the entry block of the function and is a /// constant size. If so, the code generator will fold it into the @@ -248,9 +248,9 @@ return 0; } - // FIXME: Remove once migration to llvm::Align is over. + // FIXME: Remove once migration to Align is over. void setAlignment(unsigned Align); - void setAlignment(llvm::MaybeAlign Align); + void setAlignment(MaybeAlign Align); /// Returns the ordering constraint of this load instruction. AtomicOrdering getOrdering() const { @@ -378,9 +378,9 @@ return 0; } - // FIXME: Remove once migration to llvm::Align is over. + // FIXME: Remove once migration to Align is over. void setAlignment(unsigned Align); - void setAlignment(llvm::MaybeAlign Align); + void setAlignment(MaybeAlign Align); /// Returns the ordering constraint of this store instruction. AtomicOrdering getOrdering() const { diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h --- a/llvm/include/llvm/MC/MCSection.h +++ b/llvm/include/llvm/MC/MCSection.h @@ -59,7 +59,7 @@ MCSymbol *Begin; MCSymbol *End = nullptr; /// The alignment requirement of this section. - llvm::Align Alignment; + Align Alignment; /// The section index in the assemblers section list. unsigned Ordinal = 0; /// The index of this section in the layout order. @@ -119,7 +119,7 @@ bool hasEnded() const; unsigned getAlignment() const { return Alignment.value(); } - void setAlignment(llvm::Align Value) { Alignment = Value; } + void setAlignment(Align Value) { Alignment = Value; } unsigned getOrdinal() const { return Ordinal; } void setOrdinal(unsigned Value) { Ordinal = Value; } diff --git a/llvm/include/llvm/Support/Alignment.h b/llvm/include/llvm/Support/Alignment.h --- a/llvm/include/llvm/Support/Alignment.h +++ b/llvm/include/llvm/Support/Alignment.h @@ -76,10 +76,10 @@ /// Returns a default constructed Align which corresponds to no alignment. /// This is useful to test for unalignment as it conveys clear semantic. - /// `if (A != llvm::Align::None())` + /// `if (A != Align::None())` /// would be better than - /// `if (A > llvm::Align(1))` - constexpr static const Align None() { return llvm::Align(); } + /// `if (A > Align(1))` + constexpr static const Align None() { return Align(); } }; /// Treats the value 0 as a 1, so Align is always at least 1. @@ -142,8 +142,8 @@ /// Returns the offset to the next integer (mod 2**64) that is greater than /// or equal to \p Value and is a multiple of \p Align. -inline uint64_t offsetToAlignment(uint64_t Value, llvm::Align Align) { - return alignTo(Value, Align) - Value; +inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) { + return alignTo(Value, Alignment) - Value; } /// Returns the log2 of the alignment. diff --git a/llvm/include/llvm/Support/OnDiskHashTable.h b/llvm/include/llvm/Support/OnDiskHashTable.h --- a/llvm/include/llvm/Support/OnDiskHashTable.h +++ b/llvm/include/llvm/Support/OnDiskHashTable.h @@ -208,8 +208,7 @@ // Pad with zeros so that we can start the hashtable at an aligned address. offset_type TableOff = Out.tell(); - uint64_t N = - llvm::offsetToAlignment(TableOff, llvm::Align(alignof(offset_type))); + uint64_t N = offsetToAlignment(TableOff, Align(alignof(offset_type))); TableOff += N; while (N--) LE.write(0); diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -560,9 +560,9 @@ STATISTIC(ObjectVisitorLoad, "Number of load instructions with unsolved size and offset"); -APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) { - if (Options.RoundToAlign && Align) - return APInt(IntTyBits, alignTo(Size.getZExtValue(), llvm::Align(Align))); +APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Alignment) { + if (Options.RoundToAlign && Alignment) + return APInt(IntTyBits, alignTo(Size.getZExtValue(), Align(Alignment))); return Size; } diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -302,12 +302,11 @@ } bool TargetTransformInfo::isLegalNTStore(Type *DataType, - llvm::Align Alignment) const { + Align Alignment) const { return TTIImpl->isLegalNTStore(DataType, Alignment); } -bool TargetTransformInfo::isLegalNTLoad(Type *DataType, - llvm::Align Alignment) const { +bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const { return TTIImpl->isLegalNTLoad(DataType, Alignment); } diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -163,29 +163,28 @@ /// getGVAlignment - Return the alignment to use for the specified global /// value. This rounds up to the preferred alignment if possible and legal. -llvm::Align AsmPrinter::getGVAlignment(const GlobalValue *GV, - const DataLayout &DL, - llvm::Align InAlign) { - llvm::Align Align; +Align AsmPrinter::getGVAlignment(const GlobalValue *GV, const DataLayout &DL, + Align InAlign) { + Align Alignment; if (const GlobalVariable *GVar = dyn_cast(GV)) - Align = llvm::Align(DL.getPreferredAlignment(GVar)); + Alignment = Align(DL.getPreferredAlignment(GVar)); // If InAlign is specified, round it to it. - if (InAlign > Align) - Align = InAlign; + if (InAlign > Alignment) + Alignment = InAlign; // If the GV has a specified alignment, take it into account. - const llvm::MaybeAlign GVAlign(GV->getAlignment()); + const MaybeAlign GVAlign(GV->getAlignment()); if (!GVAlign) - return Align; + return Alignment; assert(GVAlign && "GVAlign must be set"); // If the GVAlign is larger than NumBits, or if we are required to obey // NumBits because the GV has an assigned section, obey it. - if (*GVAlign > Align || GV->hasSection()) - Align = *GVAlign; - return Align; + if (*GVAlign > Alignment || GV->hasSection()) + Alignment = *GVAlign; + return Alignment; } AsmPrinter::AsmPrinter(TargetMachine &tm, std::unique_ptr Streamer) @@ -507,7 +506,7 @@ // If the alignment is specified, we *must* obey it. Overaligning a global // with a specified alignment is a prompt way to break globals emitted to // sections and expected to be contiguous (e.g. ObjC metadata). - const llvm::Align Align = getGVAlignment(GV, DL); + const Align Alignment = getGVAlignment(GV, DL); for (const HandlerInfo &HI : Handlers) { NamedRegionTimer T(HI.TimerName, HI.TimerDescription, @@ -523,7 +522,7 @@ const bool SupportsAlignment = getObjFileLowering().getCommDirectiveSupportsAlignment(); OutStreamer->EmitCommonSymbol(GVSym, Size, - SupportsAlignment ? Align.value() : 0); + SupportsAlignment ? Alignment.value() : 0); return; } @@ -538,7 +537,7 @@ Size = 1; // zerofill of 0 bytes is undefined. EmitLinkage(GV, GVSym); // .zerofill __DATA, __bss, _foo, 400, 5 - OutStreamer->EmitZerofill(TheSection, GVSym, Size, Align.value()); + OutStreamer->EmitZerofill(TheSection, GVSym, Size, Alignment.value()); return; } @@ -557,7 +556,7 @@ // Prefer to simply fall back to .local / .comm in this case. if (MAI->getLCOMMDirectiveAlignmentType() != LCOMM::NoAlignment) { // .lcomm _foo, 42 - OutStreamer->EmitLocalCommonSymbol(GVSym, Size, Align.value()); + OutStreamer->EmitLocalCommonSymbol(GVSym, Size, Alignment.value()); return; } @@ -567,7 +566,7 @@ const bool SupportsAlignment = getObjFileLowering().getCommDirectiveSupportsAlignment(); OutStreamer->EmitCommonSymbol(GVSym, Size, - SupportsAlignment ? Align.value() : 0); + SupportsAlignment ? Alignment.value() : 0); return; } @@ -588,11 +587,11 @@ if (GVKind.isThreadBSS()) { TheSection = getObjFileLowering().getTLSBSSSection(); - OutStreamer->EmitTBSSSymbol(TheSection, MangSym, Size, Align.value()); + OutStreamer->EmitTBSSSymbol(TheSection, MangSym, Size, Alignment.value()); } else if (GVKind.isThreadData()) { OutStreamer->SwitchSection(TheSection); - EmitAlignment(Align, GV); + EmitAlignment(Alignment, GV); OutStreamer->EmitLabel(MangSym); EmitGlobalConstant(GV->getParent()->getDataLayout(), @@ -628,7 +627,7 @@ OutStreamer->SwitchSection(TheSection); EmitLinkage(GV, EmittedInitSym); - EmitAlignment(Align, GV); + EmitAlignment(Alignment, GV); OutStreamer->EmitLabel(EmittedInitSym); @@ -1435,7 +1434,7 @@ OutStreamer->SwitchSection(TLOF.getDataSection()); const DataLayout &DL = M.getDataLayout(); - EmitAlignment(llvm::Align(DL.getPointerSize())); + EmitAlignment(Align(DL.getPointerSize())); for (const auto &Stub : Stubs) { OutStreamer->EmitLabel(Stub.first); OutStreamer->EmitSymbolValue(Stub.second.getPointer(), @@ -1462,7 +1461,7 @@ COFF::IMAGE_SCN_LNK_COMDAT, SectionKind::getReadOnly(), Stub.first->getName(), COFF::IMAGE_COMDAT_SELECT_ANY)); - EmitAlignment(llvm::Align(DL.getPointerSize())); + EmitAlignment(Align(DL.getPointerSize())); OutStreamer->EmitSymbolAttribute(Stub.first, MCSA_Global); OutStreamer->EmitLabel(Stub.first); OutStreamer->EmitSymbolValue(Stub.second.getPointer(), @@ -1763,7 +1762,7 @@ if (CurSection != CPSections[i].S) { OutStreamer->SwitchSection(CPSections[i].S); - EmitAlignment(llvm::Align(CPSections[i].Alignment)); + EmitAlignment(Align(CPSections[i].Alignment)); CurSection = CPSections[i].S; Offset = 0; } @@ -1810,7 +1809,7 @@ OutStreamer->SwitchSection(ReadOnlySection); } - EmitAlignment(llvm::Align(MJTI->getEntryAlignment(DL))); + EmitAlignment(Align(MJTI->getEntryAlignment(DL))); // Jump tables in code sections are marked with a data_region directive // where that's supported. @@ -2026,7 +2025,7 @@ llvm::stable_sort(Structors, [](const Structor &L, const Structor &R) { return L.Priority < R.Priority; }); - const llvm::Align Align = DL.getPointerPrefAlignment(); + const Align Align = DL.getPointerPrefAlignment(); for (Structor &S : Structors) { const TargetLoweringObjectFile &Obj = getObjFileLowering(); const MCSymbol *KeySym = nullptr; @@ -2150,18 +2149,17 @@ // two boundary. If a global value is specified, and if that global has // an explicit alignment requested, it will override the alignment request // if required for correctness. -void AsmPrinter::EmitAlignment(llvm::Align Align, - const GlobalObject *GV) const { +void AsmPrinter::EmitAlignment(Align Alignment, const GlobalObject *GV) const { if (GV) - Align = getGVAlignment(GV, GV->getParent()->getDataLayout(), Align); + Alignment = getGVAlignment(GV, GV->getParent()->getDataLayout(), Alignment); - if (Align == 1) + if (Alignment == Align::None()) return; // 1-byte aligned: no need to emit alignment. if (getCurrentSection()->getKind().isText()) - OutStreamer->EmitCodeAlignment(Align.value()); + OutStreamer->EmitCodeAlignment(Alignment.value()); else - OutStreamer->EmitValueToAlignment(Align.value()); + OutStreamer->EmitValueToAlignment(Alignment.value()); } //===----------------------------------------------------------------------===// @@ -2936,9 +2934,9 @@ } // Emit an alignment directive for this block, if needed. - const llvm::Align Align = MBB.getAlignment(); - if (Align != llvm::Align::None()) - EmitAlignment(Align); + const Align Alignment = MBB.getAlignment(); + if (Alignment != Align::None()) + EmitAlignment(Alignment); MCCodePaddingContext Context; setupCodePaddingContext(MBB, Context); OutStreamer->EmitCodePaddingBasicBlockStart(Context); diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp --- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -2509,8 +2509,8 @@ unsigned TupleSize = PtrSize * 2; // 7.20 in the Dwarf specs requires the table to be aligned to a tuple. - unsigned Padding = offsetToAlignment(sizeof(int32_t) + ContentSize, - llvm::Align(TupleSize)); + unsigned Padding = + offsetToAlignment(sizeof(int32_t) + ContentSize, Align(TupleSize)); ContentSize += Padding; ContentSize += (List.size() + 1) * TupleSize; diff --git a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp --- a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp @@ -426,7 +426,7 @@ // EHABI). In this case LSDASection will be NULL. if (LSDASection) Asm->OutStreamer->SwitchSection(LSDASection); - Asm->EmitAlignment(llvm::Align(4)); + Asm->EmitAlignment(Align(4)); // Emit the LSDA. MCSymbol *GCCETSym = @@ -602,11 +602,11 @@ } if (HaveTTData) { - Asm->EmitAlignment(llvm::Align(4)); + Asm->EmitAlignment(Align(4)); emitTypeInfos(TTypeEncoding, TTBaseLabel); } - Asm->EmitAlignment(llvm::Align(4)); + Asm->EmitAlignment(Align(4)); return GCCETSym; } diff --git a/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/ErlangGCPrinter.cpp @@ -72,7 +72,7 @@ **/ // Align to address width. - AP.EmitAlignment(IntPtrSize == 4 ? llvm::Align(4) : llvm::Align(8)); + AP.EmitAlignment(IntPtrSize == 4 ? Align(4) : Align(8)); // Emit PointCount. OS.AddComment("safe point count"); diff --git a/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp @@ -129,7 +129,7 @@ report_fatal_error(" Too much descriptor for ocaml GC"); } AP.emitInt16(NumDescriptors); - AP.EmitAlignment(IntPtrSize == 4 ? llvm::Align(4) : llvm::Align(8)); + AP.EmitAlignment(IntPtrSize == 4 ? Align(4) : Align(8)); for (GCModuleInfo::FuncInfoVec::iterator I = Info.funcinfo_begin(), IE = Info.funcinfo_end(); @@ -180,7 +180,7 @@ AP.emitInt16(K->StackOffset); } - AP.EmitAlignment(IntPtrSize == 4 ? llvm::Align(4) : llvm::Align(8)); + AP.EmitAlignment(IntPtrSize == 4 ? Align(4) : Align(8)); } } } diff --git a/llvm/lib/CodeGen/BranchRelaxation.cpp b/llvm/lib/CodeGen/BranchRelaxation.cpp --- a/llvm/lib/CodeGen/BranchRelaxation.cpp +++ b/llvm/lib/CodeGen/BranchRelaxation.cpp @@ -65,17 +65,17 @@ /// block. unsigned postOffset(const MachineBasicBlock &MBB) const { const unsigned PO = Offset + Size; - const llvm::Align Align = MBB.getAlignment(); - if (Align == 1) + const Align Alignment = MBB.getAlignment(); + if (Alignment == 1) return PO; - const llvm::Align ParentAlign = MBB.getParent()->getAlignment(); - if (Align <= ParentAlign) - return PO + offsetToAlignment(PO, Align); + const Align ParentAlign = MBB.getParent()->getAlignment(); + if (Alignment <= ParentAlign) + return PO + offsetToAlignment(PO, Alignment); // The alignment of this MBB is larger than the function's alignment, so we // can't tell whether or not it will insert nops. Assume that it will. - return PO + Align.value() + offsetToAlignment(PO, Align); + return PO + Alignment.value() + offsetToAlignment(PO, Alignment); } }; diff --git a/llvm/lib/CodeGen/CallingConvLower.cpp b/llvm/lib/CodeGen/CallingConvLower.cpp --- a/llvm/lib/CodeGen/CallingConvLower.cpp +++ b/llvm/lib/CodeGen/CallingConvLower.cpp @@ -43,17 +43,18 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, int MinSize, int MinAlignment, ISD::ArgFlagsTy ArgFlags) { - llvm::Align MinAlign(MinAlignment); - llvm::Align Align(ArgFlags.getByValAlign()); + Align MinAlign(MinAlignment); + Align Alignment(ArgFlags.getByValAlign()); unsigned Size = ArgFlags.getByValSize(); if (MinSize > (int)Size) Size = MinSize; - if (MinAlign > Align) - Align = MinAlign; - ensureMaxAlignment(Align); - MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align.value()); + if (MinAlign > Alignment) + Alignment = MinAlign; + ensureMaxAlignment(Alignment); + MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, + Alignment.value()); Size = unsigned(alignTo(Size, MinAlign)); - unsigned Offset = AllocateStack(Size, Align.value()); + unsigned Offset = AllocateStack(Size, Alignment.value()); addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); } @@ -198,7 +199,7 @@ void CCState::getRemainingRegParmsForType(SmallVectorImpl &Regs, MVT VT, CCAssignFn Fn) { unsigned SavedStackOffset = StackOffset; - llvm::Align SavedMaxStackArgAlign = MaxStackArgAlign; + Align SavedMaxStackArgAlign = MaxStackArgAlign; unsigned NumLocs = Locs.size(); // Set the 'inreg' flag if it is used for this calling convention. diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -866,7 +866,7 @@ bool DstAlignCanChange = false; MachineFrameInfo &MFI = MF.getFrameInfo(); bool OptSize = shouldLowerMemFuncForSize(MF); - unsigned Align = MinAlign(DstAlign, SrcAlign); + unsigned Alignment = MinAlign(DstAlign, SrcAlign); MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) @@ -885,7 +885,8 @@ MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo(); if (!findGISelOptimalMemOpLowering( - MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Align), SrcAlign, + MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment), + SrcAlign, /*IsMemset=*/false, /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false, /*AllowOverlap=*/!IsVolatile, DstPtrInfo.getAddrSpace(), @@ -901,16 +902,16 @@ // realignment. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (!TRI->needsStackRealignment(MF)) - while (NewAlign > Align && - DL.exceedsNaturalStackAlignment(llvm::Align(NewAlign))) - NewAlign /= 2; + while (NewAlign > Alignment && + DL.exceedsNaturalStackAlignment(Align(NewAlign))) + NewAlign /= 2; - if (NewAlign > Align) { + if (NewAlign > Alignment) { unsigned FI = FIDef->getOperand(1).getIndex(); // Give the stack frame object a larger alignment if needed. if (MFI.getObjectAlignment(FI) < NewAlign) MFI.setObjectAlignment(FI, NewAlign); - Align = NewAlign; + Alignment = NewAlign; } } @@ -973,7 +974,7 @@ bool DstAlignCanChange = false; MachineFrameInfo &MFI = MF.getFrameInfo(); bool OptSize = shouldLowerMemFuncForSize(MF); - unsigned Align = MinAlign(DstAlign, SrcAlign); + unsigned Alignment = MinAlign(DstAlign, SrcAlign); MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) @@ -991,7 +992,8 @@ // to a bug in it's findOptimalMemOpLowering implementation. For now do the // same thing here. if (!findGISelOptimalMemOpLowering( - MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Align), SrcAlign, + MemOps, Limit, KnownLen, (DstAlignCanChange ? 0 : Alignment), + SrcAlign, /*IsMemset=*/false, /*ZeroMemset=*/false, /*MemcpyStrSrc=*/false, /*AllowOverlap=*/false, DstPtrInfo.getAddrSpace(), @@ -1007,16 +1009,16 @@ // realignment. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (!TRI->needsStackRealignment(MF)) - while (NewAlign > Align && - DL.exceedsNaturalStackAlignment(llvm::Align(NewAlign))) - NewAlign /= 2; + while (NewAlign > Alignment && + DL.exceedsNaturalStackAlignment(Align(NewAlign))) + NewAlign /= 2; - if (NewAlign > Align) { + if (NewAlign > Alignment) { unsigned FI = FIDef->getOperand(1).getIndex(); // Give the stack frame object a larger alignment if needed. if (MFI.getObjectAlignment(FI) < NewAlign) MFI.setObjectAlignment(FI, NewAlign); - Align = NewAlign; + Alignment = NewAlign; } } diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -641,7 +641,7 @@ return error(Loc, Twine("redefinition of machine basic block with id #") + Twine(ID)); if (Alignment) - MBB->setAlignment(llvm::Align(Alignment)); + MBB->setAlignment(Align(Alignment)); if (HasAddressTaken) MBB->setHasAddressTaken(); MBB->setIsEHPad(IsLandingPad); diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp --- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp @@ -393,7 +393,7 @@ } if (YamlMF.Alignment) - MF.setAlignment(llvm::Align(YamlMF.Alignment)); + MF.setAlignment(Align(YamlMF.Alignment)); MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice); MF.setHasWinCFI(YamlMF.HasWinCFI); diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp --- a/llvm/lib/CodeGen/MIRPrinter.cpp +++ b/llvm/lib/CodeGen/MIRPrinter.cpp @@ -629,7 +629,7 @@ OS << "landing-pad"; HasAttributes = true; } - if (MBB.getAlignment() != llvm::Align::None()) { + if (MBB.getAlignment() != Align::None()) { OS << (HasAttributes ? ", " : " ("); OS << "align " << MBB.getAlignment().value(); HasAttributes = true; diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp --- a/llvm/lib/CodeGen/MachineBasicBlock.cpp +++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp @@ -326,7 +326,7 @@ OS << "landing-pad"; HasAttributes = true; } - if (getAlignment() != llvm::Align::None()) { + if (getAlignment() != Align::None()) { OS << (HasAttributes ? ", " : " ("); OS << "align " << Log2(getAlignment()); HasAttributes = true; diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -2807,7 +2807,7 @@ if (!L) continue; - const llvm::Align Align = TLI->getPrefLoopAlignment(L); + const Align Align = TLI->getPrefLoopAlignment(L); if (Align == 1) continue; // Don't care about loop alignment. @@ -3109,14 +3109,14 @@ if (AlignAllBlock) // Align all of the blocks in the function to a specific alignment. for (MachineBasicBlock &MBB : MF) - MBB.setAlignment(llvm::Align(1ULL << AlignAllBlock)); + MBB.setAlignment(Align(1ULL << AlignAllBlock)); else if (AlignAllNonFallThruBlocks) { // Align all of the blocks that have no fall-through predecessors to a // specific alignment. for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) { auto LayoutPred = std::prev(MBI); if (!LayoutPred->isSuccessor(&*MBI)) - MBI->setAlignment(llvm::Align(1ULL << AlignAllNonFallThruBlocks)); + MBI->setAlignment(Align(1ULL << AlignAllNonFallThruBlocks)); } } if (ViewBlockLayoutWithBFI != GVDT_None && diff --git a/llvm/lib/CodeGen/MachineFrameInfo.cpp b/llvm/lib/CodeGen/MachineFrameInfo.cpp --- a/llvm/lib/CodeGen/MachineFrameInfo.cpp +++ b/llvm/lib/CodeGen/MachineFrameInfo.cpp @@ -28,26 +28,26 @@ using namespace llvm; -void MachineFrameInfo::ensureMaxAlignment(llvm::Align Align) { +void MachineFrameInfo::ensureMaxAlignment(Align Alignment) { if (!StackRealignable) - assert(Align <= StackAlignment && - "For targets without stack realignment, Align is out of limit!"); - if (MaxAlignment < Align) MaxAlignment = Align; + assert(Alignment <= StackAlignment && + "For targets without stack realignment, Alignment is out of limit!"); + if (MaxAlignment < Alignment) + MaxAlignment = Alignment; } /// Clamp the alignment if requested and emit a warning. -static inline llvm::Align clampStackAlignment(bool ShouldClamp, - llvm::Align Align, - llvm::Align StackAlign) { - if (!ShouldClamp || Align <= StackAlign) - return Align; - LLVM_DEBUG(dbgs() << "Warning: requested alignment " << Align.value() - << " exceeds the stack alignment " << StackAlign.value() +static inline Align clampStackAlignment(bool ShouldClamp, Align Alignment, + Align StackAlignment) { + if (!ShouldClamp || Alignment <= StackAlignment) + return Alignment; + LLVM_DEBUG(dbgs() << "Warning: requested alignment " << Alignment.value() + << " exceeds the stack alignment " << StackAlignment.value() << " when stack realignment is off" << '\n'); - return StackAlign; + return StackAlignment; } -int MachineFrameInfo::CreateStackObject(uint64_t Size, llvm::Align Alignment, +int MachineFrameInfo::CreateStackObject(uint64_t Size, Align Alignment, bool IsSpillSlot, const AllocaInst *Alloca, uint8_t StackID) { @@ -62,8 +62,7 @@ return Index; } -int MachineFrameInfo::CreateSpillStackObject(uint64_t Size, - llvm::Align Alignment) { +int MachineFrameInfo::CreateSpillStackObject(uint64_t Size, Align Alignment) { Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment); CreateStackObject(Size, Alignment, true); int Index = (int)Objects.size() - NumFixedObjects - 1; @@ -71,7 +70,7 @@ return Index; } -int MachineFrameInfo::CreateVariableSizedObject(llvm::Align Alignment, +int MachineFrameInfo::CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca) { HasVarSizedObjects = true; Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment); @@ -89,8 +88,8 @@ // object is 16-byte aligned. Note that unlike the non-fixed case, if the // stack needs realignment, we can't assume that the stack will in fact be // aligned. - llvm::Align Alignment = commonAlignment( - ForcedRealign ? llvm::Align::None() : StackAlignment, SPOffset); + Align Alignment = + commonAlignment(ForcedRealign ? Align::None() : StackAlignment, SPOffset); Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment); Objects.insert(Objects.begin(), StackObject(Size, Alignment, SPOffset, IsImmutable, @@ -102,8 +101,8 @@ int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable) { - llvm::Align Alignment = commonAlignment( - ForcedRealign ? llvm::Align::None() : StackAlignment, SPOffset); + Align Alignment = + commonAlignment(ForcedRealign ? Align::None() : StackAlignment, SPOffset); Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment); Objects.insert(Objects.begin(), StackObject(Size, Alignment, SPOffset, IsImmutable, diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -182,7 +182,7 @@ STI->getTargetLowering()->getPrefFunctionAlignment()); if (AlignAllFunctions) - Alignment = llvm::Align(1ULL << AlignAllFunctions); + Alignment = Align(1ULL << AlignAllFunctions); JumpTableInfo = nullptr; diff --git a/llvm/lib/CodeGen/PatchableFunction.cpp b/llvm/lib/CodeGen/PatchableFunction.cpp --- a/llvm/lib/CodeGen/PatchableFunction.cpp +++ b/llvm/lib/CodeGen/PatchableFunction.cpp @@ -78,7 +78,7 @@ MIB.add(MO); FirstActualI->eraseFromParent(); - MF.ensureAlignment(llvm::Align(16)); + MF.ensureAlignment(Align(16)); return true; } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1898,7 +1898,7 @@ EVT VT = Node->getValueType(0); SDValue Tmp1 = Node->getOperand(0); SDValue Tmp2 = Node->getOperand(1); - const llvm::MaybeAlign MA(Node->getConstantOperandVal(3)); + const MaybeAlign MA(Node->getConstantOperandVal(3)); SDValue VAListLoad = getLoad(TLI.getPointerTy(getDataLayout()), dl, Tmp1, Tmp2, MachinePointerInfo(V)); @@ -5757,7 +5757,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, - uint64_t Size, unsigned Align, + uint64_t Size, unsigned Alignment, bool isVol, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) { @@ -5782,15 +5782,15 @@ if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) DstAlignCanChange = true; unsigned SrcAlign = DAG.InferPtrAlignment(Src); - if (Align > SrcAlign) - SrcAlign = Align; + if (Alignment > SrcAlign) + SrcAlign = Alignment; ConstantDataArraySlice Slice; bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize); if (!TLI.findOptimalMemOpLowering( - MemOps, Limit, Size, (DstAlignCanChange ? 0 : Align), + MemOps, Limit, Size, (DstAlignCanChange ? 0 : Alignment), (isZeroConstant ? 0 : SrcAlign), /*IsMemset=*/false, /*ZeroMemset=*/false, /*MemcpyStrSrc=*/CopyFromConstant, /*AllowOverlap=*/!isVol, DstPtrInfo.getAddrSpace(), @@ -5805,15 +5805,15 @@ // realignment. const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); if (!TRI->needsStackRealignment(MF)) - while (NewAlign > Align && - DL.exceedsNaturalStackAlignment(llvm::Align(NewAlign))) - NewAlign /= 2; + while (NewAlign > Alignment && + DL.exceedsNaturalStackAlignment(Align(NewAlign))) + NewAlign /= 2; - if (NewAlign > Align) { + if (NewAlign > Alignment) { // Give the stack frame object a larger alignment if needed. if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) MFI.setObjectAlignment(FI->getIndex(), NewAlign); - Align = NewAlign; + Alignment = NewAlign; } } @@ -5856,10 +5856,9 @@ } Value = getMemsetStringVal(VT, dl, DAG, TLI, SubSlice); if (Value.getNode()) { - Store = DAG.getStore(Chain, dl, Value, - DAG.getMemBasePlusOffset(Dst, DstOff, dl), - DstPtrInfo.getWithOffset(DstOff), Align, - MMOFlags); + Store = DAG.getStore( + Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), + DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags); OutChains.push_back(Store); } } @@ -5887,7 +5886,7 @@ Store = DAG.getTruncStore( Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), - DstPtrInfo.getWithOffset(DstOff), VT, Align, MMOFlags); + DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags); OutStoreChains.push_back(Store); } SrcOff += VTSize; diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp --- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp @@ -729,17 +729,17 @@ // Assign the address of each symbol for (auto &Sym : SymbolsToAllocate) { - uint32_t Align = Sym.getAlignment(); + uint32_t Alignment = Sym.getAlignment(); uint64_t Size = Sym.getCommonSize(); StringRef Name; if (auto NameOrErr = Sym.getName()) Name = *NameOrErr; else return NameOrErr.takeError(); - if (Align) { + if (Alignment) { // This symbol has an alignment requirement. uint64_t AlignOffset = - offsetToAlignment((uint64_t)Addr, llvm::Align(Align)); + offsetToAlignment((uint64_t)Addr, Align(Alignment)); Addr += AlignOffset; Offset += AlignOffset; } diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -51,7 +51,7 @@ // Loop over each of the elements, placing them in memory. for (unsigned i = 0, e = NumElements; i != e; ++i) { Type *Ty = ST->getElementType(i); - const llvm::Align TyAlign(ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty)); + const Align TyAlign(ST->isPacked() ? 1 : DL.getABITypeAlignment(Ty)); // Add padding if necessary to align the data element properly. if (!isAligned(TyAlign, StructSize)) { @@ -98,10 +98,8 @@ // LayoutAlignElem, LayoutAlign support //===----------------------------------------------------------------------===// -LayoutAlignElem LayoutAlignElem::get(AlignTypeEnum align_type, - llvm::Align abi_align, - llvm::Align pref_align, - uint32_t bit_width) { +LayoutAlignElem LayoutAlignElem::get(AlignTypeEnum align_type, Align abi_align, + Align pref_align, uint32_t bit_width) { assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); LayoutAlignElem retval; retval.AlignType = align_type; @@ -123,10 +121,8 @@ // PointerAlignElem, PointerAlign support //===----------------------------------------------------------------------===// -PointerAlignElem PointerAlignElem::get(uint32_t AddressSpace, - llvm::Align ABIAlign, - llvm::Align PrefAlign, - uint32_t TypeByteWidth, +PointerAlignElem PointerAlignElem::get(uint32_t AddressSpace, Align ABIAlign, + Align PrefAlign, uint32_t TypeByteWidth, uint32_t IndexWidth) { assert(ABIAlign <= PrefAlign && "Preferred alignment worse than ABI!"); PointerAlignElem retval; @@ -160,19 +156,18 @@ } static const LayoutAlignElem DefaultAlignments[] = { - {INTEGER_ALIGN, 1, llvm::Align(1), llvm::Align(1)}, // i1 - {INTEGER_ALIGN, 8, llvm::Align(1), llvm::Align(1)}, // i8 - {INTEGER_ALIGN, 16, llvm::Align(2), llvm::Align(2)}, // i16 - {INTEGER_ALIGN, 32, llvm::Align(4), llvm::Align(4)}, // i32 - {INTEGER_ALIGN, 64, llvm::Align(4), llvm::Align(8)}, // i64 - {FLOAT_ALIGN, 16, llvm::Align(2), llvm::Align(2)}, // half - {FLOAT_ALIGN, 32, llvm::Align(4), llvm::Align(4)}, // float - {FLOAT_ALIGN, 64, llvm::Align(8), llvm::Align(8)}, // double - {FLOAT_ALIGN, 128, llvm::Align(16), llvm::Align(16)}, // ppcf128, quad, ... - {VECTOR_ALIGN, 64, llvm::Align(8), llvm::Align(8)}, // v2i32, v1i64, ... - {VECTOR_ALIGN, 128, llvm::Align(16), - llvm::Align(16)}, // v16i8, v8i16, v4i32, ... - {AGGREGATE_ALIGN, 0, llvm::Align(1), llvm::Align(8)} // struct + {INTEGER_ALIGN, 1, Align(1), Align(1)}, // i1 + {INTEGER_ALIGN, 8, Align(1), Align(1)}, // i8 + {INTEGER_ALIGN, 16, Align(2), Align(2)}, // i16 + {INTEGER_ALIGN, 32, Align(4), Align(4)}, // i32 + {INTEGER_ALIGN, 64, Align(4), Align(8)}, // i64 + {FLOAT_ALIGN, 16, Align(2), Align(2)}, // half + {FLOAT_ALIGN, 32, Align(4), Align(4)}, // float + {FLOAT_ALIGN, 64, Align(8), Align(8)}, // double + {FLOAT_ALIGN, 128, Align(16), Align(16)}, // ppcf128, quad, ... + {VECTOR_ALIGN, 64, Align(8), Align(8)}, // v2i32, v1i64, ... + {VECTOR_ALIGN, 128, Align(16), Align(16)}, // v16i8, v8i16, v4i32, ... + {AGGREGATE_ALIGN, 0, Align(1), Align(8)} // struct }; void DataLayout::reset(StringRef Desc) { @@ -193,7 +188,7 @@ setAlignment((AlignTypeEnum)E.AlignType, E.ABIAlign, E.PrefAlign, E.TypeBitWidth); } - setPointerAlignment(0, llvm::Align(8), llvm::Align(8), 8, 8); + setPointerAlignment(0, Align(8), Align(8), 8, 8); parseSpecifier(Desc); } @@ -486,8 +481,8 @@ }); } -void DataLayout::setAlignment(AlignTypeEnum align_type, llvm::Align abi_align, - llvm::Align pref_align, uint32_t bit_width) { +void DataLayout::setAlignment(AlignTypeEnum align_type, Align abi_align, + Align pref_align, uint32_t bit_width) { // AlignmentsTy::ABIAlign and AlignmentsTy::PrefAlign were once stored as // uint16_t, it is unclear if there are requirements for alignment to be less // than 2^16 other than storage. In the meantime we leave the restriction as @@ -520,9 +515,8 @@ }); } -void DataLayout::setPointerAlignment(uint32_t AddrSpace, llvm::Align ABIAlign, - llvm::Align PrefAlign, - uint32_t TypeByteWidth, +void DataLayout::setPointerAlignment(uint32_t AddrSpace, Align ABIAlign, + Align PrefAlign, uint32_t TypeByteWidth, uint32_t IndexWidth) { if (PrefAlign < ABIAlign) report_fatal_error( @@ -542,9 +536,8 @@ /// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or /// preferred if ABIInfo = false) the layout wants for the specified datatype. -llvm::Align DataLayout::getAlignmentInfo(AlignTypeEnum AlignType, - uint32_t BitWidth, bool ABIInfo, - Type *Ty) const { +Align DataLayout::getAlignmentInfo(AlignTypeEnum AlignType, uint32_t BitWidth, + bool ABIInfo, Type *Ty) const { AlignmentsTy::const_iterator I = findAlignmentLowerBound(AlignType, BitWidth); // See if we found an exact match. Of if we are looking for an integer type, // but don't have an exact match take the next largest integer. This is where @@ -563,10 +556,11 @@ } else if (AlignType == VECTOR_ALIGN) { // By default, use natural alignment for vector types. This is consistent // with what clang and llvm-gcc do. - unsigned Align = getTypeAllocSize(cast(Ty)->getElementType()); - Align *= cast(Ty)->getNumElements(); - Align = PowerOf2Ceil(Align); - return llvm::Align(Align); + unsigned Alignment = + getTypeAllocSize(cast(Ty)->getElementType()); + Alignment *= cast(Ty)->getNumElements(); + Alignment = PowerOf2Ceil(Alignment); + return Align(Alignment); } // If we still couldn't find a reasonable default alignment, fall back @@ -575,9 +569,9 @@ // approximation of reality, and if the user wanted something less // less conservative, they should have specified it explicitly in the data // layout. - unsigned Align = getTypeStoreSize(Ty); - Align = PowerOf2Ceil(Align); - return llvm::Align(Align); + unsigned Alignment = getTypeStoreSize(Ty); + Alignment = PowerOf2Ceil(Alignment); + return Align(Alignment); } namespace { @@ -638,7 +632,7 @@ return L; } -llvm::Align DataLayout::getPointerABIAlignment(unsigned AS) const { +Align DataLayout::getPointerABIAlignment(unsigned AS) const { PointersTy::const_iterator I = findPointerLowerBound(AS); if (I == Pointers.end() || I->AddressSpace != AS) { I = findPointerLowerBound(0); @@ -647,7 +641,7 @@ return I->ABIAlign; } -llvm::Align DataLayout::getPointerPrefAlignment(unsigned AS) const { +Align DataLayout::getPointerPrefAlignment(unsigned AS) const { PointersTy::const_iterator I = findPointerLowerBound(AS); if (I == Pointers.end() || I->AddressSpace != AS) { I = findPointerLowerBound(0); @@ -704,7 +698,7 @@ Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref == false) for the requested type \a Ty. */ -llvm::Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const { +Align DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const { AlignTypeEnum AlignType; assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); @@ -723,12 +717,11 @@ case Type::StructTyID: { // Packed structure types always have an ABI alignment of one. if (cast(Ty)->isPacked() && abi_or_pref) - return llvm::Align::None(); + return Align::None(); // Get the layout annotation... which is lazily created on demand. const StructLayout *Layout = getStructLayout(cast(Ty)); - const llvm::Align Align = - getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty); + const Align Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty); return std::max(Align, Layout->getAlignment()); } case Type::IntegerTyID: @@ -761,7 +754,7 @@ /// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for /// an integer type of the specified bitwidth. -llvm::Align DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const { +Align DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const { return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, nullptr); } diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -1248,7 +1248,7 @@ setAlignment(llvm::MaybeAlign(Align)); } -void AllocaInst::setAlignment(llvm::MaybeAlign Align) { +void AllocaInst::setAlignment(MaybeAlign Align) { assert((!Align || *Align <= MaximumAlignment) && "Alignment is greater than MaximumAlignment!"); setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) | @@ -1343,7 +1343,7 @@ setAlignment(llvm::MaybeAlign(Align)); } -void LoadInst::setAlignment(llvm::MaybeAlign Align) { +void LoadInst::setAlignment(MaybeAlign Align) { assert((!Align || *Align <= MaximumAlignment) && "Alignment is greater than MaximumAlignment!"); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | @@ -1430,7 +1430,7 @@ setAlignment(llvm::MaybeAlign(Align)); } -void StoreInst::setAlignment(llvm::MaybeAlign Align) { +void StoreInst::setAlignment(MaybeAlign Align) { assert((!Align || *Align <= MaximumAlignment) && "Alignment is greater than MaximumAlignment!"); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -667,7 +667,7 @@ assert(getType()->isPointerTy() && "must be pointer"); if (auto *GO = dyn_cast(this)) { if (isa(GO)) { - const llvm::MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign(); + const MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign(); const unsigned Align = FunctionPtrAlign ? FunctionPtrAlign->value() : 0; switch (DL.getFunctionPtrAlignType()) { case DataLayout::FunctionPtrAlignType::Independent: diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp --- a/llvm/lib/MC/ELFObjectWriter.cpp +++ b/llvm/lib/MC/ELFObjectWriter.cpp @@ -337,7 +337,7 @@ } // end anonymous namespace void ELFWriter::align(unsigned Alignment) { - uint64_t Padding = offsetToAlignment(W.OS.tell(), llvm::Align(Alignment)); + uint64_t Padding = offsetToAlignment(W.OS.tell(), Align(Alignment)); W.OS.write_zeros(Padding); } @@ -638,7 +638,7 @@ unsigned EntrySize = is64Bit() ? ELF::SYMENTRY_SIZE64 : ELF::SYMENTRY_SIZE32; MCSectionELF *SymtabSection = Ctx.getELFSection(".symtab", ELF::SHT_SYMTAB, 0, EntrySize, ""); - SymtabSection->setAlignment(is64Bit() ? llvm::Align(8) : llvm::Align(4)); + SymtabSection->setAlignment(is64Bit() ? Align(8) : Align(4)); SymbolTableIndex = addToSectionTable(SymtabSection); align(SymtabSection->getAlignment()); @@ -736,7 +736,7 @@ MCSectionELF *SymtabShndxSection = Ctx.getELFSection(".symtab_shndx", ELF::SHT_SYMTAB_SHNDX, 0, 4, ""); SymtabShndxSectionIndex = addToSectionTable(SymtabShndxSection); - SymtabShndxSection->setAlignment(llvm::Align(4)); + SymtabShndxSection->setAlignment(Align(4)); } ArrayRef FileNames = Asm.getFileNames(); @@ -824,7 +824,7 @@ MCSectionELF *RelaSection = Ctx.createELFRelSection( RelaSectionName, hasRelocationAddend() ? ELF::SHT_RELA : ELF::SHT_REL, Flags, EntrySize, Sec.getGroup(), &Sec); - RelaSection->setAlignment(is64Bit() ? llvm::Align(8) : llvm::Align(4)); + RelaSection->setAlignment(is64Bit() ? Align(8) : Align(4)); return RelaSection; } @@ -911,7 +911,7 @@ Section.setFlags(Section.getFlags() | ELF::SHF_COMPRESSED); // Alignment field should reflect the requirements of // the compressed section header. - Section.setAlignment(is64Bit() ? llvm::Align(8) : llvm::Align(4)); + Section.setAlignment(is64Bit() ? Align(8) : Align(4)); } else { // Add "z" prefix to section name. This is zlib-gnu style. MC.renameELFSection(&Section, (".z" + SectionName.drop_front(1)).str()); @@ -1135,7 +1135,7 @@ if (!GroupIdx) { MCSectionELF *Group = Ctx.createELFGroupSection(SignatureSymbol); GroupIdx = addToSectionTable(Group); - Group->setAlignment(llvm::Align(4)); + Group->setAlignment(Align(4)); Groups.push_back(Group); } std::vector &Members = diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp --- a/llvm/lib/MC/MCAssembler.cpp +++ b/llvm/lib/MC/MCAssembler.cpp @@ -322,7 +322,7 @@ case MCFragment::FT_Align: { const MCAlignFragment &AF = cast(F); unsigned Offset = Layout.getFragmentOffset(&AF); - unsigned Size = offsetToAlignment(Offset, llvm::Align(AF.getAlignment())); + unsigned Size = offsetToAlignment(Offset, Align(AF.getAlignment())); // Insert extra Nops for code alignment if the target define // shouldInsertExtraNopBytesForCodeAlign target hook. diff --git a/llvm/lib/MC/MCELFStreamer.cpp b/llvm/lib/MC/MCELFStreamer.cpp --- a/llvm/lib/MC/MCELFStreamer.cpp +++ b/llvm/lib/MC/MCELFStreamer.cpp @@ -139,7 +139,7 @@ MCSection *Section) { if (Section && Assembler.isBundlingEnabled() && Section->hasInstructions() && Section->getAlignment() < Assembler.getBundleAlignSize()) - Section->setAlignment(llvm::Align(Assembler.getBundleAlignSize())); + Section->setAlignment(Align(Assembler.getBundleAlignSize())); } void MCELFStreamer::ChangeSection(MCSection *Section, @@ -309,7 +309,7 @@ // Update the maximum alignment of the section if necessary. if (ByteAlignment > Section.getAlignment()) - Section.setAlignment(llvm::Align(ByteAlignment)); + Section.setAlignment(Align(ByteAlignment)); SwitchSection(P.first, P.second); } else { diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp --- a/llvm/lib/MC/MCObjectStreamer.cpp +++ b/llvm/lib/MC/MCObjectStreamer.cpp @@ -539,7 +539,7 @@ // Update the maximum alignment on the current section if necessary. MCSection *CurSec = getCurrentSectionOnly(); if (ByteAlignment > CurSec->getAlignment()) - CurSec->setAlignment(llvm::Align(ByteAlignment)); + CurSec->setAlignment(Align(ByteAlignment)); } void MCObjectStreamer::EmitCodeAlignment(unsigned ByteAlignment, diff --git a/llvm/lib/MC/MCWinCOFFStreamer.cpp b/llvm/lib/MC/MCWinCOFFStreamer.cpp --- a/llvm/lib/MC/MCWinCOFFStreamer.cpp +++ b/llvm/lib/MC/MCWinCOFFStreamer.cpp @@ -192,7 +192,7 @@ MCSection *SXData = getContext().getObjectFileInfo()->getSXDataSection(); getAssembler().registerSection(*SXData); if (SXData->getAlignment() < 4) - SXData->setAlignment(llvm::Align(4)); + SXData->setAlignment(Align(4)); new MCSymbolIdFragment(Symbol, SXData); @@ -209,7 +209,7 @@ MCSection *Sec = getCurrentSectionOnly(); getAssembler().registerSection(*Sec); if (Sec->getAlignment() < 4) - Sec->setAlignment(llvm::Align(4)); + Sec->setAlignment(Align(4)); new MCSymbolIdFragment(Symbol, getCurrentSectionOnly()); diff --git a/llvm/lib/MC/MachObjectWriter.cpp b/llvm/lib/MC/MachObjectWriter.cpp --- a/llvm/lib/MC/MachObjectWriter.cpp +++ b/llvm/lib/MC/MachObjectWriter.cpp @@ -127,7 +127,7 @@ const MCSection &NextSec = *Layout.getSectionOrder()[Next]; if (NextSec.isVirtualSection()) return 0; - return offsetToAlignment(EndAddr, llvm::Align(NextSec.getAlignment())); + return offsetToAlignment(EndAddr, Align(NextSec.getAlignment())); } void MachObjectWriter::writeHeader(MachO::HeaderFileType Type, @@ -445,8 +445,8 @@ } // Pad to a multiple of the pointer size. - W.OS.write_zeros(offsetToAlignment(BytesWritten, is64Bit() ? llvm::Align(8) - : llvm::Align(4))); + W.OS.write_zeros( + offsetToAlignment(BytesWritten, is64Bit() ? Align(8) : Align(4))); assert(W.OS.tell() - Start == Size); } @@ -835,7 +835,7 @@ // // FIXME: Is this machine dependent? unsigned SectionDataPadding = - offsetToAlignment(SectionDataFileSize, llvm::Align(4)); + offsetToAlignment(SectionDataFileSize, Align(4)); SectionDataFileSize += SectionDataPadding; // Write the prolog, starting with the header and load command... @@ -1000,8 +1000,8 @@ #endif Asm.getLOHContainer().emit(*this, Layout); // Pad to a multiple of the pointer size. - W.OS.write_zeros(offsetToAlignment(LOHRawSize, is64Bit() ? llvm::Align(8) - : llvm::Align(4))); + W.OS.write_zeros( + offsetToAlignment(LOHRawSize, is64Bit() ? Align(8) : Align(4))); assert(W.OS.tell() - Start == LOHSize); } diff --git a/llvm/lib/Object/ArchiveWriter.cpp b/llvm/lib/Object/ArchiveWriter.cpp --- a/llvm/lib/Object/ArchiveWriter.cpp +++ b/llvm/lib/Object/ArchiveWriter.cpp @@ -177,7 +177,7 @@ unsigned UID, unsigned GID, unsigned Perms, uint64_t Size) { uint64_t PosAfterHeader = Pos + 60 + Name.size(); // Pad so that even 64 bit object files are aligned. - unsigned Pad = offsetToAlignment(PosAfterHeader, llvm::Align(8)); + unsigned Pad = offsetToAlignment(PosAfterHeader, Align(8)); unsigned NameWithPadding = Name.size() + Pad; printWithSpacePadding(Out, Twine("#1/") + Twine(NameWithPadding), 16); printRestOfMemberHeader(Out, ModTime, UID, GID, Perms, @@ -244,7 +244,7 @@ static MemberData computeStringTable(StringRef Names) { unsigned Size = Names.size(); - unsigned Pad = offsetToAlignment(Size, llvm::Align(2)); + unsigned Pad = offsetToAlignment(Size, Align(2)); std::string Header; raw_string_ostream Out(Header); printWithSpacePadding(Out, "//", 48); @@ -308,7 +308,7 @@ // least 4-byte aligned for 32-bit content. Opt for the larger encoding // uniformly. // We do this for all bsd formats because it simplifies aligning members. - const llvm::Align Alignment(isBSDLike(Kind) ? 8 : 2); + const Align Alignment(isBSDLike(Kind) ? 8 : 2); unsigned Pad = offsetToAlignment(Size, Alignment); Size += Pad; @@ -465,9 +465,9 @@ // uniformly. This matches the behaviour with cctools and ensures that ld64 // is happy with archives that we generate. unsigned MemberPadding = - isDarwin(Kind) ? offsetToAlignment(Data.size(), llvm::Align(8)) : 0; + isDarwin(Kind) ? offsetToAlignment(Data.size(), Align(8)) : 0; unsigned TailPadding = - offsetToAlignment(Data.size() + MemberPadding, llvm::Align(2)); + offsetToAlignment(Data.size() + MemberPadding, Align(2)); StringRef Padding = StringRef(PaddingData, MemberPadding + TailPadding); sys::TimePoint ModTime; diff --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp --- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp +++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp @@ -711,7 +711,7 @@ if (JTBBs.empty()) continue; unsigned Size = AFI->getJumpTableEntrySize(JTI); - EmitAlignment(llvm::Align(Size)); + EmitAlignment(Align(Size)); OutStreamer->EmitLabel(GetJTISymbol(JTI)); for (auto *JTBB : JTBBs) diff --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp --- a/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp +++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.cpp @@ -40,10 +40,10 @@ MVT LocVT, ISD::ArgFlagsTy &ArgFlags, CCState &State, unsigned SlotAlign) { unsigned Size = LocVT.getSizeInBits() / 8; - const llvm::Align StackAlign = + const Align StackAlign = State.getMachineFunction().getDataLayout().getStackAlignment(); - const llvm::Align OrigAlign(ArgFlags.getOrigAlign()); - const llvm::Align Align = std::min(OrigAlign, StackAlign); + const Align OrigAlign(ArgFlags.getOrigAlign()); + const Align Align = std::min(OrigAlign, StackAlign); for (auto &It : PendingMembers) { It.convertToMem(State.AllocateStack( diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -641,11 +641,10 @@ EnableExtLdPromotion = true; // Set required alignment. - setMinFunctionAlignment(llvm::Align(4)); + setMinFunctionAlignment(Align(4)); // Set preferred alignments. - setPrefLoopAlignment(llvm::Align(1ULL << STI.getPrefLoopLogAlignment())); - setPrefFunctionAlignment( - llvm::Align(1ULL << STI.getPrefFunctionLogAlignment())); + setPrefLoopAlignment(Align(1ULL << STI.getPrefLoopLogAlignment())); + setPrefFunctionAlignment(Align(1ULL << STI.getPrefFunctionLogAlignment())); // Only change the limit for targets in a jump table if specified by // the sub target, but not at the command line. diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -229,7 +229,7 @@ // alignment. Streamer.EmitValueToAlignment(64, 0, 1, 0); if (ReadOnlySection.getAlignment() < 64) - ReadOnlySection.setAlignment(llvm::Align(64)); + ReadOnlySection.setAlignment(Align(64)); const MCSubtargetInfo &STI = MF->getSubtarget(); @@ -417,7 +417,7 @@ // The starting address of all shader programs must be 256 bytes aligned. // Regular functions just need the basic required instruction alignment. - MF.setAlignment(MFI->isEntryFunction() ? llvm::Align(256) : llvm::Align(4)); + MF.setAlignment(MFI->isEntryFunction() ? Align(256) : Align(4)); SetupMachineFunction(MF); diff --git a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp --- a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp @@ -104,7 +104,7 @@ // Functions needs to be cacheline (256B) aligned. - MF.ensureAlignment(llvm::Align(256)); + MF.ensureAlignment(Align(256)); SetupMachineFunction(MF); diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -384,7 +384,7 @@ unsigned Depth = 0) const override; AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; - llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override; + Align getPrefLoopAlignment(MachineLoop *ML) const override; void allocateHSAUserSGPRs(CCState &CCInfo, MachineFunction &MF, diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -10684,9 +10684,9 @@ Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); } -llvm::Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { - const llvm::Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML); - const llvm::Align CacheLineAlign = llvm::Align(64); +Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { + const Align PrefAlign = TargetLowering::getPrefLoopAlignment(ML); + const Align CacheLineAlign = Align(64); // Pre-GFX10 target did not benefit from loop alignment if (!ML || DisableLoopAlignment || diff --git a/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h b/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h --- a/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h +++ b/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h @@ -35,7 +35,7 @@ : ReturnStackOffsetSet(false), VarArgsFrameIndex(0), ReturnStackOffset(-1U), MaxCallStackReq(0) { // Functions are 4-byte aligned. - MF.setAlignment(llvm::Align(4)); + MF.setAlignment(Align(4)); } ~ARCFunctionInfo() {} diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp --- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp @@ -168,7 +168,7 @@ // relatively easy to exceed the thumb branch range within a TU. if (! ThumbIndirectPads.empty()) { OutStreamer->EmitAssemblerFlag(MCAF_Code16); - EmitAlignment(llvm::Align(2)); + EmitAlignment(Align(2)); for (std::pair &TIP : ThumbIndirectPads) { OutStreamer->EmitLabel(TIP.second); EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tBX) @@ -526,7 +526,7 @@ if (!Stubs.empty()) { // Switch with ".non_lazy_symbol_pointer" directive. OutStreamer->SwitchSection(TLOFMacho.getNonLazySymbolPointerSection()); - EmitAlignment(llvm::Align(4)); + EmitAlignment(Align(4)); for (auto &Stub : Stubs) emitNonLazySymbolPointer(*OutStreamer, Stub.first, Stub.second); @@ -539,7 +539,7 @@ if (!Stubs.empty()) { // Switch with ".non_lazy_symbol_pointer" directive. OutStreamer->SwitchSection(TLOFMacho.getThreadLocalPointerSection()); - EmitAlignment(llvm::Align(4)); + EmitAlignment(Align(4)); for (auto &Stub : Stubs) emitNonLazySymbolPointer(*OutStreamer, Stub.first, Stub.second); @@ -940,7 +940,7 @@ // Make sure the Thumb jump table is 4-byte aligned. This will be a nop for // ARM mode tables. - EmitAlignment(llvm::Align(4)); + EmitAlignment(Align(4)); // Emit a label for the jump table. MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI); @@ -986,7 +986,7 @@ // Make sure the Thumb jump table is 4-byte aligned. This will be a nop for // ARM mode tables. - EmitAlignment(llvm::Align(4)); + EmitAlignment(Align(4)); // Emit a label for the jump table. MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI); @@ -1015,7 +1015,7 @@ unsigned JTI = MO1.getIndex(); if (Subtarget->isThumb1Only()) - EmitAlignment(llvm::Align(4)); + EmitAlignment(Align(4)); MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI); OutStreamer->EmitLabel(JTISymbol); @@ -1058,7 +1058,7 @@ OutStreamer->EmitDataRegion(MCDR_DataRegionEnd); // Make sure the next instruction is 2-byte aligned. - EmitAlignment(llvm::Align(2)); + EmitAlignment(Align(2)); } void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) { diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.h b/llvm/lib/Target/ARM/ARMBasicBlockInfo.h --- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.h +++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.h @@ -27,11 +27,11 @@ /// unknown offset bits. This does not include alignment padding caused by /// known offset bits. /// -/// @param Align alignment +/// @param Alignment alignment /// @param KnownBits Number of known low offset bits. -inline unsigned UnknownPadding(llvm::Align Align, unsigned KnownBits) { - if (KnownBits < Log2(Align)) - return Align.value() - (1ull << KnownBits); +inline unsigned UnknownPadding(Align Alignment, unsigned KnownBits) { + if (KnownBits < Log2(Alignment)) + return Alignment.value() - (1ull << KnownBits); return 0; } @@ -67,7 +67,7 @@ /// PostAlign - When > 1, the block terminator contains a .align /// directive, so the end of the block is aligned to PostAlign bytes. - llvm::Align PostAlign; + Align PostAlign; BasicBlockInfo() = default; @@ -86,10 +86,10 @@ /// Compute the offset immediately following this block. If Align is /// specified, return the offset the successor block will get if it has /// this alignment. - unsigned postOffset(llvm::Align Align = llvm::Align::None()) const { + unsigned postOffset(Align Alignment = Align::None()) const { unsigned PO = Offset + Size; - const llvm::Align PA = std::max(PostAlign, Align); - if (PA == llvm::Align::None()) + const Align PA = std::max(PostAlign, Alignment); + if (PA == Align::None()) return PO; // Add alignment padding from the terminator. return PO + UnknownPadding(PA, internalKnownBits()); @@ -100,7 +100,7 @@ /// instruction alignment. An aligned terminator may increase the number /// of know bits. /// If LogAlign is given, also consider the alignment of the next block. - unsigned postKnownBits(llvm::Align Align = llvm::Align::None()) const { + unsigned postKnownBits(Align Align = Align::None()) const { return std::max(Log2(std::max(PostAlign, Align)), internalKnownBits()); } }; diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp --- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp @@ -47,7 +47,7 @@ BasicBlockInfo &BBI = BBInfo[MBB->getNumber()]; BBI.Size = 0; BBI.Unalign = 0; - BBI.PostAlign = llvm::Align::None(); + BBI.PostAlign = Align::None(); for (MachineInstr &I : *MBB) { BBI.Size += TII->getInstSizeInBytes(I); @@ -62,8 +62,8 @@ // tBR_JTr contains a .align 2 directive. if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) { - BBI.PostAlign = llvm::Align(4); - MBB->getParent()->ensureAlignment(llvm::Align(4)); + BBI.PostAlign = Align(4); + MBB->getParent()->ensureAlignment(Align(4)); } } @@ -126,7 +126,7 @@ for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) { // Get the offset and known bits at the end of the layout predecessor. // Include the alignment of the current block. - const llvm::Align Align = MF.getBlockNumbered(i)->getAlignment(); + const Align Align = MF.getBlockNumbered(i)->getAlignment(); const unsigned Offset = BBInfo[i - 1].postOffset(Align); const unsigned KnownBits = BBInfo[i - 1].postKnownBits(Align); diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp --- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -247,7 +247,7 @@ void doInitialJumpTablePlacement(std::vector &CPEMIs); bool BBHasFallthrough(MachineBasicBlock *MBB); CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI); - llvm::Align getCPEAlign(const MachineInstr *CPEMI); + Align getCPEAlign(const MachineInstr *CPEMI); void scanFunctionJumpTables(); void initializeFunctionInfo(const std::vector &CPEMIs); MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI); @@ -404,7 +404,7 @@ // Functions with jump tables need an alignment of 4 because they use the ADR // instruction, which aligns the PC to 4 bytes before adding an offset. if (!T2JumpTables.empty()) - MF->ensureAlignment(llvm::Align(4)); + MF->ensureAlignment(Align(4)); /// Remove dead constant pool entries. MadeChange |= removeUnusedCPEntries(); @@ -494,7 +494,7 @@ MF->push_back(BB); // MachineConstantPool measures alignment in bytes. - const llvm::Align MaxAlign(MCP->getConstantPoolAlignment()); + const Align MaxAlign(MCP->getConstantPoolAlignment()); const unsigned MaxLogAlign = Log2(MaxAlign); // Mark the basic block as required by the const-pool. @@ -650,25 +650,25 @@ /// getCPEAlign - Returns the required alignment of the constant pool entry /// represented by CPEMI. -llvm::Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) { +Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) { switch (CPEMI->getOpcode()) { case ARM::CONSTPOOL_ENTRY: break; case ARM::JUMPTABLE_TBB: - return isThumb1 ? llvm::Align(4) : llvm::Align(1); + return isThumb1 ? Align(4) : Align(1); case ARM::JUMPTABLE_TBH: - return isThumb1 ? llvm::Align(4) : llvm::Align(2); + return isThumb1 ? Align(4) : Align(2); case ARM::JUMPTABLE_INSTS: - return llvm::Align(2); + return Align(2); case ARM::JUMPTABLE_ADDRS: - return llvm::Align(4); + return Align(4); default: llvm_unreachable("unknown constpool entry kind"); } unsigned CPI = getCombinedIndex(CPEMI); assert(CPI < MCP->getConstants().size() && "Invalid constant pool index."); - return llvm::Align(MCP->getConstants()[CPI].getAlignment()); + return Align(MCP->getConstants()[CPI].getAlignment()); } /// scanFunctionJumpTables - Do a scan of the function, building up @@ -1021,10 +1021,10 @@ MachineBasicBlock* Water, CPUser &U, unsigned &Growth) { BBInfoVector &BBInfo = BBUtils->getBBInfo(); - const llvm::Align CPEAlign = getCPEAlign(U.CPEMI); + const Align CPEAlign = getCPEAlign(U.CPEMI); const unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPEAlign); unsigned NextBlockOffset; - llvm::Align NextBlockAlignment; + Align NextBlockAlignment; MachineFunction::const_iterator NextBlock = Water->getIterator(); if (++NextBlock == MF->end()) { NextBlockOffset = BBInfo[Water->getNumber()].postOffset(); @@ -1214,7 +1214,7 @@ // inserting islands between BB0 and BB1 makes other accesses out of range. MachineBasicBlock *UserBB = U.MI->getParent(); BBInfoVector &BBInfo = BBUtils->getBBInfo(); - const llvm::Align CPEAlign = getCPEAlign(U.CPEMI); + const Align CPEAlign = getCPEAlign(U.CPEMI); unsigned MinNoSplitDisp = BBInfo[UserBB->getNumber()].postOffset(CPEAlign); if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2) return false; @@ -1268,7 +1268,7 @@ CPUser &U = CPUsers[CPUserIndex]; MachineInstr *UserMI = U.MI; MachineInstr *CPEMI = U.CPEMI; - const llvm::Align CPEAlign = getCPEAlign(CPEMI); + const Align CPEAlign = getCPEAlign(CPEMI); MachineBasicBlock *UserMBB = UserMI->getParent(); BBInfoVector &BBInfo = BBUtils->getBBInfo(); const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()]; @@ -1323,7 +1323,7 @@ // Try to split the block so it's fully aligned. Compute the latest split // point where we can add a 4-byte branch instruction, and then align to // Align which is the largest possible alignment in the function. - const llvm::Align Align = MF->getAlignment(); + const Align Align = MF->getAlignment(); assert(Align >= CPEAlign && "Over-aligned constant pool entry"); unsigned KnownBits = UserBBI.internalKnownBits(); unsigned UPad = UnknownPadding(Align, KnownBits); @@ -1501,9 +1501,9 @@ // Always align the new block because CP entries can be smaller than 4 // bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may // be an already aligned constant pool block. - const llvm::Align Align = isThumb ? llvm::Align(2) : llvm::Align(4); - if (NewMBB->getAlignment() < Align) - NewMBB->setAlignment(Align); + const Align Alignment = isThumb ? Align(2) : Align(4); + if (NewMBB->getAlignment() < Alignment) + NewMBB->setAlignment(Alignment); // Remove the original WaterList entry; we want subsequent insertions in // this vicinity to go after the one we're about to insert. This @@ -1566,7 +1566,7 @@ BBInfo[CPEBB->getNumber()].Size = 0; // This block no longer needs to be aligned. - CPEBB->setAlignment(llvm::Align::None()); + CPEBB->setAlignment(Align::None()); } else { // Entries are sorted by descending alignment, so realign from the front. CPEBB->setAlignment(getCPEAlign(&*CPEBB->begin())); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -1428,16 +1428,14 @@ // On ARM arguments smaller than 4 bytes are extended, so all arguments // are at least 4 bytes aligned. - setMinStackArgumentAlignment(llvm::Align(4)); + setMinStackArgumentAlignment(Align(4)); // Prefer likely predicted branches to selects on out-of-order cores. PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); - setPrefLoopAlignment( - llvm::Align(1ULL << Subtarget->getPrefLoopLogAlignment())); + setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment())); - setMinFunctionAlignment(Subtarget->isThumb() ? llvm::Align(2) - : llvm::Align(4)); + setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4)); if (Subtarget->isThumb() || Subtarget->isThumb2()) setTargetDAGCombine(ISD::ABS); diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp --- a/llvm/lib/Target/AVR/AVRISelLowering.cpp +++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp @@ -236,7 +236,7 @@ setLibcallName(RTLIB::SIN_F32, "sin"); setLibcallName(RTLIB::COS_F32, "cos"); - setMinFunctionAlignment(llvm::Align(2)); + setMinFunctionAlignment(Align(2)); setMinimumJumpTableEntries(UINT_MAX); } diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp --- a/llvm/lib/Target/BPF/BPFISelLowering.cpp +++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp @@ -133,8 +133,8 @@ setBooleanContents(ZeroOrOneBooleanContent); // Function alignments - setMinFunctionAlignment(llvm::Align(8)); - setPrefFunctionAlignment(llvm::Align(8)); + setMinFunctionAlignment(Align(8)); + setPrefFunctionAlignment(Align(8)); if (BPFExpandMemcpyInOrder) { // LLVM generic code will try to expand memcpy into load/store pairs at this diff --git a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp --- a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp +++ b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp @@ -105,7 +105,7 @@ // offset of the current instruction from the start. unsigned InstOffset = 0; for (auto &B : MF) { - if (B.getAlignment() != llvm::Align::None()) { + if (B.getAlignment() != Align::None()) { // Although we don't know the exact layout of the final code, we need // to account for alignment padding somehow. This heuristic pads each // aligned basic block according to the alignment value. diff --git a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp --- a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp @@ -114,7 +114,7 @@ // First pass - compute the offset of each basic block. for (const MachineBasicBlock &MBB : MF) { - if (MBB.getAlignment() != llvm::Align::None()) { + if (MBB.getAlignment() != Align::None()) { // Although we don't know the exact layout of the final code, we need // to account for alignment padding somehow. This heuristic pads each // aligned basic block according to the alignment value. diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -1380,7 +1380,7 @@ Align A = MFI.getLocalFrameMaxAlign(); assert(A <= 8 && "Unexpected local frame alignment"); if (A == 1) - MFI.setLocalFrameMaxAlign(llvm::Align(8)); + MFI.setLocalFrameMaxAlign(Align(8)); MFI.setUseLocalStackAllocationBlock(true); // Set the physical aligned-stack base address register. diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -1235,9 +1235,9 @@ Subtarget(ST) { auto &HRI = *Subtarget.getRegisterInfo(); - setPrefLoopAlignment(llvm::Align(16)); - setMinFunctionAlignment(llvm::Align(4)); - setPrefFunctionAlignment(llvm::Align(16)); + setPrefLoopAlignment(Align(16)); + setMinFunctionAlignment(Align(4)); + setPrefFunctionAlignment(Align(16)); setStackPointerRegisterToSaveRestore(HRI.getStackRegister()); setBooleanContents(TargetLoweringBase::UndefinedBooleanContent); setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent); diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCELFStreamer.cpp @@ -116,8 +116,8 @@ } // Update the maximum alignment of the section if necessary. - if (llvm::Align(ByteAlignment) > Section.getAlignment()) - Section.setAlignment(llvm::Align(ByteAlignment)); + if (Align(ByteAlignment) > Section.getAlignment()) + Section.setAlignment(Align(ByteAlignment)); SwitchSection(P.first, P.second); } else { diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp --- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp +++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp @@ -145,8 +145,8 @@ setTargetDAGCombine(ISD::XOR); // Function alignments - setMinFunctionAlignment(llvm::Align(4)); - setPrefFunctionAlignment(llvm::Align(4)); + setMinFunctionAlignment(Align(4)); + setPrefFunctionAlignment(Align(4)); setJumpIsExpensive(true); diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp --- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -327,8 +327,8 @@ setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::MSP430_BUILTIN); // TODO: __mspabi_srall, __mspabi_srlll, __mspabi_sllll - setMinFunctionAlignment(llvm::Align(2)); - setPrefFunctionAlignment(llvm::Align(2)); + setMinFunctionAlignment(Align(2)); + setPrefFunctionAlignment(Align(2)); } SDValue MSP430TargetLowering::LowerOperation(SDValue Op, diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp --- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -1805,9 +1805,8 @@ break; // We'll deal with this situation later on when applying fixups. if (!isIntN(inMicroMipsMode() ? 17 : 18, Offset.getImm())) return Error(IDLoc, "branch target out of range"); - if (offsetToAlignment( - Offset.getImm(), - (inMicroMipsMode() ? llvm::Align(2) : llvm::Align(4)))) + if (offsetToAlignment(Offset.getImm(), + (inMicroMipsMode() ? Align(2) : Align(4)))) return Error(IDLoc, "branch to misaligned address"); break; case Mips::BGEZ: @@ -1836,9 +1835,8 @@ break; // We'll deal with this situation later on when applying fixups. if (!isIntN(inMicroMipsMode() ? 17 : 18, Offset.getImm())) return Error(IDLoc, "branch target out of range"); - if (offsetToAlignment( - Offset.getImm(), - (inMicroMipsMode() ? llvm::Align(2) : llvm::Align(4)))) + if (offsetToAlignment(Offset.getImm(), + (inMicroMipsMode() ? Align(2) : Align(4)))) return Error(IDLoc, "branch to misaligned address"); break; case Mips::BGEC: case Mips::BGEC_MMR6: @@ -1853,7 +1851,7 @@ break; // We'll deal with this situation later on when applying fixups. if (!isIntN(18, Offset.getImm())) return Error(IDLoc, "branch target out of range"); - if (offsetToAlignment(Offset.getImm(), llvm::Align(4))) + if (offsetToAlignment(Offset.getImm(), Align(4))) return Error(IDLoc, "branch to misaligned address"); break; case Mips::BLEZC: case Mips::BLEZC_MMR6: @@ -1866,7 +1864,7 @@ break; // We'll deal with this situation later on when applying fixups. if (!isIntN(18, Offset.getImm())) return Error(IDLoc, "branch target out of range"); - if (offsetToAlignment(Offset.getImm(), llvm::Align(4))) + if (offsetToAlignment(Offset.getImm(), Align(4))) return Error(IDLoc, "branch to misaligned address"); break; case Mips::BEQZC: case Mips::BEQZC_MMR6: @@ -1877,7 +1875,7 @@ break; // We'll deal with this situation later on when applying fixups. if (!isIntN(23, Offset.getImm())) return Error(IDLoc, "branch target out of range"); - if (offsetToAlignment(Offset.getImm(), llvm::Align(4))) + if (offsetToAlignment(Offset.getImm(), Align(4))) return Error(IDLoc, "branch to misaligned address"); break; case Mips::BEQZ16_MM: @@ -1890,7 +1888,7 @@ break; // We'll deal with this situation later on when applying fixups. if (!isInt<8>(Offset.getImm())) return Error(IDLoc, "branch target out of range"); - if (offsetToAlignment(Offset.getImm(), llvm::Align(2))) + if (offsetToAlignment(Offset.getImm(), Align(2))) return Error(IDLoc, "branch to misaligned address"); break; } @@ -3495,7 +3493,7 @@ } else { if (!isInt<17>(Offset.getImm())) return Error(IDLoc, "branch target out of range"); - if (offsetToAlignment(Offset.getImm(), llvm::Align(2))) + if (offsetToAlignment(Offset.getImm(), Align(2))) return Error(IDLoc, "branch to misaligned address"); Inst.clear(); Inst.setOpcode(Mips::BEQ_MM); diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h @@ -15,7 +15,7 @@ namespace llvm { // NaCl MIPS sandbox's instruction bundle size. -static const llvm::Align MIPS_NACL_BUNDLE_ALIGN = llvm::Align(16); +static const Align MIPS_NACL_BUNDLE_ALIGN = Align(16); bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx, bool *IsStore = nullptr); diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsOptionRecord.cpp @@ -37,7 +37,7 @@ Context.getELFSection(".MIPS.options", ELF::SHT_MIPS_OPTIONS, ELF::SHF_ALLOC | ELF::SHF_MIPS_NOSTRIP, 1, ""); MCA.registerSection(*Sec); - Sec->setAlignment(llvm::Align(8)); + Sec->setAlignment(Align(8)); Streamer->SwitchSection(Sec); Streamer->EmitIntValue(ELF::ODK_REGINFO, 1); // kind @@ -55,7 +55,7 @@ MCSectionELF *Sec = Context.getELFSection(".reginfo", ELF::SHT_MIPS_REGINFO, ELF::SHF_ALLOC, 24, ""); MCA.registerSection(*Sec); - Sec->setAlignment(MTS->getABI().IsN32() ? llvm::Align(8) : llvm::Align(4)); + Sec->setAlignment(MTS->getABI().IsN32() ? Align(8) : Align(4)); Streamer->SwitchSection(Sec); Streamer->EmitIntValue(ri_gprmask, 4); diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp @@ -901,12 +901,9 @@ MCSection &BSSSection = *OFI.getBSSSection(); MCA.registerSection(BSSSection); - TextSection.setAlignment( - llvm::Align(std::max(16u, TextSection.getAlignment()))); - DataSection.setAlignment( - llvm::Align(std::max(16u, DataSection.getAlignment()))); - BSSSection.setAlignment( - llvm::Align(std::max(16u, BSSSection.getAlignment()))); + TextSection.setAlignment(Align(std::max(16u, TextSection.getAlignment()))); + DataSection.setAlignment(Align(std::max(16u, DataSection.getAlignment()))); + BSSSection.setAlignment(Align(std::max(16u, BSSSection.getAlignment()))); if (RoundSectionSizes) { // Make sections sizes a multiple of the alignment. This is useful for @@ -1029,7 +1026,7 @@ MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, Context); MCA.registerSection(*Sec); - Sec->setAlignment(llvm::Align(4)); + Sec->setAlignment(Align(4)); OS.PushSection(); @@ -1319,7 +1316,7 @@ MCSectionELF *Sec = Context.getELFSection( ".MIPS.abiflags", ELF::SHT_MIPS_ABIFLAGS, ELF::SHF_ALLOC, 24, ""); MCA.registerSection(*Sec); - Sec->setAlignment(llvm::Align(8)); + Sec->setAlignment(Align(8)); OS.SwitchSection(Sec); OS << ABIFlagsSection; diff --git a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp --- a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -371,7 +371,7 @@ void doInitialPlacement(std::vector &CPEMIs); CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI); - llvm::Align getCPEAlign(const MachineInstr &CPEMI); + Align getCPEAlign(const MachineInstr &CPEMI); void initializeFunctionInfo(const std::vector &CPEMIs); unsigned getOffsetOf(MachineInstr *MI) const; unsigned getUserOffset(CPUser&) const; @@ -529,11 +529,11 @@ MF->push_back(BB); // MachineConstantPool measures alignment in bytes. We measure in log2(bytes). - const llvm::Align MaxAlign(MCP->getConstantPoolAlignment()); + const Align MaxAlign(MCP->getConstantPoolAlignment()); // Mark the basic block as required by the const-pool. // If AlignConstantIslands isn't set, use 4-byte alignment for everything. - BB->setAlignment(AlignConstantIslands ? MaxAlign : llvm::Align(4)); + BB->setAlignment(AlignConstantIslands ? MaxAlign : Align(4)); // The function needs to be as aligned as the basic blocks. The linker may // move functions around based on their alignment. @@ -619,16 +619,16 @@ /// getCPEAlign - Returns the required alignment of the constant pool entry /// represented by CPEMI. Alignment is measured in log2(bytes) units. -llvm::Align MipsConstantIslands::getCPEAlign(const MachineInstr &CPEMI) { +Align MipsConstantIslands::getCPEAlign(const MachineInstr &CPEMI) { assert(CPEMI.getOpcode() == Mips::CONSTPOOL_ENTRY); // Everything is 4-byte aligned unless AlignConstantIslands is set. if (!AlignConstantIslands) - return llvm::Align(4); + return Align(4); unsigned CPI = CPEMI.getOperand(1).getIndex(); assert(CPI < MCP->getConstants().size() && "Invalid constant pool index."); - return llvm::Align(MCP->getConstants()[CPI].getAlignment()); + return Align(MCP->getConstants()[CPI].getAlignment()); } /// initializeFunctionInfo - Do the initial scan of the function, building up @@ -936,11 +936,11 @@ unsigned &Growth) { unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(); unsigned NextBlockOffset; - llvm::Align NextBlockAlignment; + Align NextBlockAlignment; MachineFunction::const_iterator NextBlock = ++Water->getIterator(); if (NextBlock == MF->end()) { NextBlockOffset = BBInfo[Water->getNumber()].postOffset(); - NextBlockAlignment = llvm::Align::None(); + NextBlockAlignment = Align::None(); } else { NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset; NextBlockAlignment = NextBlock->getAlignment(); @@ -1251,7 +1251,7 @@ // Try to split the block so it's fully aligned. Compute the latest split // point where we can add a 4-byte branch instruction, and then align to // Align which is the largest possible alignment in the function. - const llvm::Align Align = MF->getAlignment(); + const Align Align = MF->getAlignment(); unsigned BaseInsertOffset = UserOffset + U.getMaxDisp(); LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x", BaseInsertOffset)); @@ -1423,7 +1423,7 @@ BBInfo[CPEBB->getNumber()].Size = 0; // This block no longer needs to be aligned. - CPEBB->setAlignment(llvm::Align(1)); + CPEBB->setAlignment(Align(1)); } else { // Entries are sorted by descending alignment, so realign from the front. CPEBB->setAlignment(getCPEAlign(*CPEBB->begin())); @@ -1522,7 +1522,7 @@ // We should have a way to back out this alignment restriction if we "can" later. // but it is not harmful. // - DestBB->setAlignment(llvm::Align(4)); + DestBB->setAlignment(Align(4)); Br.MaxDisp = ((1<<24)-1) * 2; MI->setDesc(TII->get(Mips::JalB16)); } diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -514,13 +514,12 @@ setLibcallName(RTLIB::SRA_I128, nullptr); } - setMinFunctionAlignment(Subtarget.isGP64bit() ? llvm::Align(8) - : llvm::Align(4)); + setMinFunctionAlignment(Subtarget.isGP64bit() ? Align(8) : Align(4)); // The arguments on the stack are defined in terms of 4-byte slots on O32 // and 8-byte slots on N32/N64. - setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? llvm::Align(8) - : llvm::Align(4)); + setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? Align(8) + : Align(4)); setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP); @@ -2148,7 +2147,7 @@ EVT VT = Node->getValueType(0); SDValue Chain = Node->getOperand(0); SDValue VAListPtr = Node->getOperand(1); - const llvm::Align Align = + const Align Align = llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne(); const Value *SV = cast(Node->getOperand(2))->getValue(); SDLoc DL(Node); diff --git a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp --- a/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -247,8 +247,8 @@ Base = Addr.getOperand(0); // If base is a FI, additional offset calculation is done in // eliminateFrameIndex, otherwise we need to check the alignment - const llvm::Align Align(1ULL << ShiftAmount); - if (!isAligned(Align, CN->getZExtValue())) + const Align Alignment(1ULL << ShiftAmount); + if (!isAligned(Alignment, CN->getZExtValue())) return false; } diff --git a/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp b/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp --- a/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp +++ b/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp @@ -212,7 +212,7 @@ // element size), otherwise it is a 16-bit signed immediate. unsigned OffsetBitSize = getLoadStoreOffsetSizeInBits(MI.getOpcode(), MI.getOperand(OpNo - 1)); - const llvm::Align OffsetAlign(getLoadStoreOffsetAlign(MI.getOpcode())); + const Align OffsetAlign(getLoadStoreOffsetAlign(MI.getOpcode())); if (OffsetBitSize < 16 && isInt<16>(Offset) && (!isIntN(OffsetBitSize, Offset) || !isAligned(OffsetAlign, Offset))) { // If we have an offset that needs to fit into a signed n-bit immediate diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp --- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -1634,7 +1634,7 @@ if (!Stubs.empty()) { // Switch with ".non_lazy_symbol_pointer" directive. OutStreamer->SwitchSection(TLOFMacho.getNonLazySymbolPointerSection()); - EmitAlignment(isPPC64 ? llvm::Align(8) : llvm::Align(4)); + EmitAlignment(isPPC64 ? Align(8) : Align(4)); for (unsigned i = 0, e = Stubs.size(); i != e; ++i) { // L_foo$stub: diff --git a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp --- a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp +++ b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp @@ -81,20 +81,20 @@ /// original Offset. unsigned PPCBSel::GetAlignmentAdjustment(MachineBasicBlock &MBB, unsigned Offset) { - const llvm::Align Align = MBB.getAlignment(); - if (Align == 1) + const Align Alignment = MBB.getAlignment(); + if (Alignment == Align::None()) return 0; - const llvm::Align ParentAlign = MBB.getParent()->getAlignment(); + const Align ParentAlign = MBB.getParent()->getAlignment(); - if (Align <= ParentAlign) - return offsetToAlignment(Offset, Align); + if (Alignment <= ParentAlign) + return offsetToAlignment(Offset, Alignment); // The alignment of this MBB is larger than the function's alignment, so we // can't tell whether or not it will insert nops. Assume that it will. if (FirstImpreciseBlock < 0) FirstImpreciseBlock = MBB.getNumber(); - return Align.value() + offsetToAlignment(Offset, Align); + return Alignment.value() + offsetToAlignment(Offset, Alignment); } /// We need to be careful about the offset of the first block in the function @@ -178,7 +178,7 @@ const MachineBasicBlock *Dest, unsigned BrOffset) { int BranchSize; - llvm::Align MaxAlign = llvm::Align(4); + Align MaxAlign = Align(4); bool NeedExtraAdjustment = false; if (Dest->getNumber() <= Src->getNumber()) { // If this is a backwards branch, the delta is the offset from the diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -742,7 +742,7 @@ const SelectionDAG &DAG, unsigned Depth = 0) const override; - llvm::Align getPrefLoopAlignment(MachineLoop *ML) const override; + Align getPrefLoopAlignment(MachineLoop *ML) const override; bool shouldInsertFencesForAtomic(const Instruction *I) const override { return true; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -139,7 +139,7 @@ // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all // arguments are at least 4/8 bytes aligned. bool isPPC64 = Subtarget.isPPC64(); - setMinStackArgumentAlignment(isPPC64 ? llvm::Align(8) : llvm::Align(4)); + setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4)); // Set up the register classes. addRegisterClass(MVT::i32, &PPC::GPRCRegClass); @@ -1179,9 +1179,9 @@ setJumpIsExpensive(); } - setMinFunctionAlignment(llvm::Align(4)); + setMinFunctionAlignment(Align(4)); if (Subtarget.isDarwin()) - setPrefFunctionAlignment(llvm::Align(16)); + setPrefFunctionAlignment(Align(16)); switch (Subtarget.getDarwinDirective()) { default: break; @@ -1198,8 +1198,8 @@ case PPC::DIR_PWR7: case PPC::DIR_PWR8: case PPC::DIR_PWR9: - setPrefLoopAlignment(llvm::Align(16)); - setPrefFunctionAlignment(llvm::Align(16)); + setPrefLoopAlignment(Align(16)); + setPrefFunctionAlignment(Align(16)); break; } @@ -14110,7 +14110,7 @@ } } -llvm::Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { +Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { switch (Subtarget.getDarwinDirective()) { default: break; case PPC::DIR_970: @@ -14131,7 +14131,7 @@ // Actual alignment of the loop will depend on the hotness check and other // logic in alignBlocks. if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty()) - return llvm::Align(32); + return Align(32); } const PPCInstrInfo *TII = Subtarget.getInstrInfo(); @@ -14147,7 +14147,7 @@ } if (LoopSize > 16 && LoopSize <= 32) - return llvm::Align(32); + return Align(32); break; } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -198,7 +198,7 @@ setBooleanContents(ZeroOrOneBooleanContent); // Function alignments. - const llvm::Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); + const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); setMinFunctionAlignment(FunctionAlignment); setPrefFunctionAlignment(FunctionAlignment); diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -1805,7 +1805,7 @@ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); - setMinFunctionAlignment(llvm::Align(4)); + setMinFunctionAlignment(Align(4)); computeRegisterProperties(Subtarget->getRegisterInfo()); } diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -120,9 +120,9 @@ setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); // Instructions are strings of 2-byte aligned 2-byte values. - setMinFunctionAlignment(llvm::Align(2)); + setMinFunctionAlignment(Align(2)); // For performance reasons we prefer 16-byte alignment. - setPrefFunctionAlignment(llvm::Align(16)); + setPrefFunctionAlignment(Align(16)); // Handle operations that are handled in a similar way for all types. for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; diff --git a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp --- a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp +++ b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp @@ -87,7 +87,7 @@ // The minimum alignment of the block. // This value never changes. - llvm::Align Alignment; + Align Alignment; // The number of terminators in this block. This value never changes. unsigned NumTerminators = 0; diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp --- a/llvm/lib/Target/X86/X86AsmPrinter.cpp +++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp @@ -575,7 +575,7 @@ // Emitting note header. int WordSize = TT.isArch64Bit() ? 8 : 4; - EmitAlignment(WordSize == 4 ? llvm::Align(4) : llvm::Align(8)); + EmitAlignment(WordSize == 4 ? Align(4) : Align(8)); OutStreamer->EmitIntValue(4, 4 /*size*/); // data size for "GNU\0" OutStreamer->EmitIntValue(8 + WordSize, 4 /*size*/); // Elf_Prop size OutStreamer->EmitIntValue(ELF::NT_GNU_PROPERTY_TYPE_0, 4 /*size*/); @@ -585,7 +585,7 @@ OutStreamer->EmitIntValue(ELF::GNU_PROPERTY_X86_FEATURE_1_AND, 4); OutStreamer->EmitIntValue(4, 4); // data size OutStreamer->EmitIntValue(FeatureFlagsAnd, 4); // data - EmitAlignment(WordSize == 4 ? llvm::Align(4) : llvm::Align(8)); // padding + EmitAlignment(WordSize == 4 ? Align(4) : Align(8)); // padding OutStreamer->endSection(Nt); OutStreamer->SwitchSection(Cur); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -1951,13 +1951,13 @@ MaxLoadsPerMemcmpOptSize = 2; // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4). - setPrefLoopAlignment(llvm::Align(1ULL << ExperimentalPrefLoopAlignment)); + setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment)); // An out-of-order CPU can speculatively execute past a predictable branch, // but a conditional move could be stalled by an expensive earlier operation. PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder(); EnableExtLdPromotion = true; - setPrefFunctionAlignment(llvm::Align(16)); + setPrefFunctionAlignment(Align(16)); verifyIntrinsicTables(); } diff --git a/llvm/lib/Target/X86/X86RetpolineThunks.cpp b/llvm/lib/Target/X86/X86RetpolineThunks.cpp --- a/llvm/lib/Target/X86/X86RetpolineThunks.cpp +++ b/llvm/lib/Target/X86/X86RetpolineThunks.cpp @@ -279,7 +279,7 @@ CallTarget->addLiveIn(Reg); CallTarget->setHasAddressTaken(); - CallTarget->setAlignment(llvm::Align(16)); + CallTarget->setAlignment(Align(16)); insertRegReturnAddrClobber(*CallTarget, Reg); CallTarget->back().setPreInstrSymbol(MF, TargetSym); BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc)); diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h --- a/llvm/lib/Target/X86/X86TargetTransformInfo.h +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h @@ -187,8 +187,8 @@ bool canMacroFuseCmp(); bool isLegalMaskedLoad(Type *DataType); bool isLegalMaskedStore(Type *DataType); - bool isLegalNTLoad(Type *DataType, llvm::Align Alignment); - bool isLegalNTStore(Type *DataType, llvm::Align Alignment); + bool isLegalNTLoad(Type *DataType, Align Alignment); + bool isLegalNTStore(Type *DataType, Align Alignment); bool isLegalMaskedGather(Type *DataType); bool isLegalMaskedScatter(Type *DataType); bool isLegalMaskedExpandLoad(Type *DataType); diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -3294,7 +3294,7 @@ return isLegalMaskedLoad(DataType); } -bool X86TTIImpl::isLegalNTLoad(Type *DataType, llvm::Align Alignment) { +bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) { unsigned DataSize = DL.getTypeStoreSize(DataType); // The only supported nontemporal loads are for aligned vectors of 16 or 32 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2 @@ -3305,7 +3305,7 @@ return false; } -bool X86TTIImpl::isLegalNTStore(Type *DataType, llvm::Align Alignment) { +bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) { unsigned DataSize = DL.getTypeStoreSize(DataType); // SSE4A supports nontemporal stores of float and double at arbitrary diff --git a/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp b/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp --- a/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp +++ b/llvm/lib/Target/XCore/XCoreAsmPrinter.cpp @@ -115,7 +115,7 @@ MCSymbol *GVSym = getSymbol(GV); const Constant *C = GV->getInitializer(); - const llvm::Align Align(DL.getPrefTypeAlignment(C->getType())); + const Align Alignment(DL.getPrefTypeAlignment(C->getType())); // Mark the start of the global getTargetStreamer().emitCCTopData(GVSym->getName()); @@ -143,7 +143,7 @@ llvm_unreachable("Unknown linkage type!"); } - EmitAlignment(std::max(Align, llvm::Align(4)), GV); + EmitAlignment(std::max(Alignment, Align(4)), GV); if (GV->isThreadLocal()) { report_fatal_error("TLS is not supported by this target!"); diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp --- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp +++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp @@ -171,8 +171,8 @@ setTargetDAGCombine(ISD::INTRINSIC_VOID); setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); - setMinFunctionAlignment(llvm::Align(2)); - setPrefFunctionAlignment(llvm::Align(4)); + setMinFunctionAlignment(Align(2)); + setPrefFunctionAlignment(Align(4)); } bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -1132,10 +1132,10 @@ /// often possible though. If alignment is important, a more reliable approach /// is to simply align all global variables and allocation instructions to /// their preferred alignment from the beginning. -static unsigned enforceKnownAlignment(Value *V, unsigned Align, +static unsigned enforceKnownAlignment(Value *V, unsigned Alignment, unsigned PrefAlign, const DataLayout &DL) { - assert(PrefAlign > Align); + assert(PrefAlign > Alignment); V = V->stripPointerCasts(); @@ -1146,36 +1146,36 @@ // stripPointerCasts recurses through infinite layers of bitcasts, // while computeKnownBits is not allowed to traverse more than 6 // levels. - Align = std::max(AI->getAlignment(), Align); - if (PrefAlign <= Align) - return Align; + Alignment = std::max(AI->getAlignment(), Alignment); + if (PrefAlign <= Alignment) + return Alignment; // If the preferred alignment is greater than the natural stack alignment // then don't round up. This avoids dynamic stack realignment. - if (DL.exceedsNaturalStackAlignment(llvm::Align(PrefAlign))) - return Align; + if (DL.exceedsNaturalStackAlignment(Align(PrefAlign))) + return Alignment; AI->setAlignment(PrefAlign); return PrefAlign; } if (auto *GO = dyn_cast(V)) { // TODO: as above, this shouldn't be necessary. - Align = std::max(GO->getAlignment(), Align); - if (PrefAlign <= Align) - return Align; + Alignment = std::max(GO->getAlignment(), Alignment); + if (PrefAlign <= Alignment) + return Alignment; // If there is a large requested alignment and we can, bump up the alignment // of the global. If the memory we set aside for the global may not be the // memory used by the final program then it is impossible for us to reliably // enforce the preferred alignment. if (!GO->canIncreaseAlignment()) - return Align; + return Alignment; GO->setAlignment(PrefAlign); return PrefAlign; } - return Align; + return Alignment; } unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -742,7 +742,7 @@ assert(VecTy && "did not find vectorized version of stored type"); unsigned Alignment = getLoadStoreAlignment(ST); assert(Alignment && "Alignment should be set"); - if (!TTI->isLegalNTStore(VecTy, llvm::Align(Alignment))) { + if (!TTI->isLegalNTStore(VecTy, Align(Alignment))) { reportVectorizationFailure( "nontemporal store instruction cannot be vectorized", "nontemporal store instruction cannot be vectorized", @@ -759,7 +759,7 @@ assert(VecTy && "did not find vectorized version of load type"); unsigned Alignment = getLoadStoreAlignment(LD); assert(Alignment && "Alignment should be set"); - if (!TTI->isLegalNTLoad(VecTy, llvm::Align(Alignment))) { + if (!TTI->isLegalNTLoad(VecTy, Align(Alignment))) { reportVectorizationFailure( "nontemporal load instruction cannot be vectorized", "nontemporal load instruction cannot be vectorized", diff --git a/llvm/tools/dsymutil/DwarfStreamer.cpp b/llvm/tools/dsymutil/DwarfStreamer.cpp --- a/llvm/tools/dsymutil/DwarfStreamer.cpp +++ b/llvm/tools/dsymutil/DwarfStreamer.cpp @@ -260,7 +260,7 @@ /// Emit the swift_ast section stored in \p Buffers. void DwarfStreamer::emitSwiftAST(StringRef Buffer) { MCSection *SwiftASTSection = MOFI->getDwarfSwiftASTSection(); - SwiftASTSection->setAlignment(llvm::Align(32)); + SwiftASTSection->setAlignment(Align(32)); MS->SwitchSection(SwiftASTSection); MS->EmitBytes(Buffer); } @@ -339,7 +339,7 @@ sizeof(int8_t); // Segment Size (in bytes) unsigned TupleSize = AddressSize * 2; - unsigned Padding = offsetToAlignment(HeaderSize, llvm::Align(TupleSize)); + unsigned Padding = offsetToAlignment(HeaderSize, Align(TupleSize)); Asm->EmitLabelDifference(EndLabel, BeginLabel, 4); // Arange length Asm->OutStreamer->EmitLabel(BeginLabel); diff --git a/llvm/tools/llvm-cov/TestingSupport.cpp b/llvm/tools/llvm-cov/TestingSupport.cpp --- a/llvm/tools/llvm-cov/TestingSupport.cpp +++ b/llvm/tools/llvm-cov/TestingSupport.cpp @@ -100,7 +100,7 @@ encodeULEB128(ProfileNamesAddress, OS); OS << ProfileNamesData; // Coverage mapping data is expected to have an alignment of 8. - for (unsigned Pad = offsetToAlignment(OS.tell(), llvm::Align(8)); Pad; --Pad) + for (unsigned Pad = offsetToAlignment(OS.tell(), Align(8)); Pad; --Pad) OS.write(uint8_t(0)); OS << CoverageMappingData; diff --git a/llvm/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp b/llvm/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp --- a/llvm/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp +++ b/llvm/tools/llvm-objcopy/MachO/MachOLayoutBuilder.cpp @@ -146,7 +146,7 @@ Sec.Offset = 0; } else { uint64_t PaddingSize = - offsetToAlignment(SegFileSize, llvm::Align(1ull << Sec.Align)); + offsetToAlignment(SegFileSize, Align(1ull << Sec.Align)); Sec.Offset = SegOffset + SegFileSize + PaddingSize; Sec.Size = Sec.Content.size(); SegFileSize += PaddingSize + Sec.Size;