diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h --- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h +++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h @@ -372,16 +372,10 @@ /// \see getBeginClobberMask() const uint32_t *getEndClobberMask(const TargetRegisterInfo *TRI) const; - /// Return alignment of the basic block. The alignment is specified as - /// log2(bytes). - /// FIXME: Remove the Log versions once migration to llvm::Align is over. - unsigned getLogAlignment() const { return Log2(Alignment); } + /// Return alignment of the basic block. llvm::Align getAlignment() const { return Alignment; } - /// Set alignment of the basic block. The alignment is specified as - /// log2(bytes). - /// FIXME: Remove the Log versions once migration to llvm::Align is over. - void setLogAlignment(unsigned A) { Alignment = llvm::Align(1ULL << A); } + /// Set alignment of the basic block. void setAlignment(llvm::Align A) { Alignment = A; } /// Returns true if the block is a landing pad. That is this basic block is diff --git a/llvm/lib/CodeGen/BranchRelaxation.cpp b/llvm/lib/CodeGen/BranchRelaxation.cpp --- a/llvm/lib/CodeGen/BranchRelaxation.cpp +++ b/llvm/lib/CodeGen/BranchRelaxation.cpp @@ -127,9 +127,8 @@ #ifndef NDEBUG unsigned PrevNum = MF->begin()->getNumber(); for (MachineBasicBlock &MBB : *MF) { - unsigned LogAlign = MBB.getLogAlignment(); - unsigned Num = MBB.getNumber(); - assert(BlockInfo[Num].Offset % (1u << LogAlign) == 0); + const unsigned Num = MBB.getNumber(); + assert(isAligned(MBB.getAlignment(), BlockInfo[Num].Offset)); assert(!Num || BlockInfo[PrevNum].postOffset(MBB) <= BlockInfo[Num].Offset); assert(BlockInfo[Num].Size == computeBlockSize(MBB)); PrevNum = Num; diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -641,7 +641,7 @@ return error(Loc, Twine("redefinition of machine basic block with id #") + Twine(ID)); if (Alignment) - MBB->setLogAlignment(Log2_32(Alignment)); + MBB->setAlignment(llvm::Align(Alignment)); if (HasAddressTaken) MBB->setHasAddressTaken(); MBB->setIsEHPad(IsLandingPad); diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp --- a/llvm/lib/CodeGen/MIRPrinter.cpp +++ b/llvm/lib/CodeGen/MIRPrinter.cpp @@ -629,10 +629,9 @@ OS << "landing-pad"; HasAttributes = true; } - if (MBB.getLogAlignment()) { + if (MBB.getAlignment() > 1) { OS << (HasAttributes ? ", " : " ("); - OS << "align " - << (1UL << MBB.getLogAlignment()); + OS << "align " << MBB.getAlignment().value(); HasAttributes = true; } if (HasAttributes) diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp --- a/llvm/lib/CodeGen/MachineBasicBlock.cpp +++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp @@ -326,9 +326,9 @@ OS << "landing-pad"; HasAttributes = true; } - if (getLogAlignment()) { + if (getAlignment() > 1) { OS << (HasAttributes ? ", " : " ("); - OS << "align " << getLogAlignment(); + OS << "align " << Log2(getAlignment()); HasAttributes = true; } if (HasAttributes) diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -2832,7 +2832,7 @@ // Force alignment if all the predecessors are jumps. We already checked // that the block isn't cold above. if (!LayoutPred->isSuccessor(ChainBB)) { - ChainBB->setLogAlignment(Log2(Align)); + ChainBB->setAlignment(Align); continue; } @@ -2844,7 +2844,7 @@ MBPI->getEdgeProbability(LayoutPred, ChainBB); BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb; if (LayoutEdgeFreq <= (Freq * ColdProb)) - ChainBB->setLogAlignment(Log2(Align)); + ChainBB->setAlignment(Align); } } @@ -3109,14 +3109,14 @@ if (AlignAllBlock) // Align all of the blocks in the function to a specific alignment. for (MachineBasicBlock &MBB : MF) - MBB.setLogAlignment(AlignAllBlock); + MBB.setAlignment(llvm::Align(1ULL << AlignAllBlock)); else if (AlignAllNonFallThruBlocks) { // Align all of the blocks that have no fall-through predecessors to a // specific alignment. for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) { auto LayoutPred = std::prev(MBI); if (!LayoutPred->isSuccessor(&*MBI)) - MBI->setLogAlignment(AlignAllNonFallThruBlocks); + MBI->setAlignment(llvm::Align(1ULL << AlignAllNonFallThruBlocks)); } } if (ViewBlockLayoutWithBFI != GVDT_None && diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -10714,7 +10714,7 @@ // If inner loop block is aligned assume in average half of the alignment // size to be added as nops. if (MBB != Header) - LoopSize += (1 << MBB->getLogAlignment()) / 2; + LoopSize += MBB->getAlignment().value() / 2; for (const MachineInstr &MI : *MBB) { LoopSize += TII->getInstSizeInBytes(MI); diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.h b/llvm/lib/Target/ARM/ARMBasicBlockInfo.h --- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.h +++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.h @@ -27,11 +27,11 @@ /// unknown offset bits. This does not include alignment padding caused by /// known offset bits. /// -/// @param LogAlign log2(alignment) +/// @param Align alignment /// @param KnownBits Number of known low offset bits. -inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) { - if (KnownBits < LogAlign) - return (1u << LogAlign) - (1u << KnownBits); +inline unsigned UnknownPadding(llvm::Align Align, unsigned KnownBits) { + if (KnownBits < Log2(Align)) + return Align.value() - (1u << KnownBits); return 0; } @@ -65,10 +65,9 @@ /// multiple of 1 << Unalign. uint8_t Unalign = 0; - /// PostAlign - When non-zero, the block terminator contains a .align - /// directive, so the end of the block is aligned to 1 << PostAlign - /// bytes. - uint8_t PostAlign = 0; + /// PostAlign - When > 1, the block terminator contains a .align + /// directive, so the end of the block is aligned to PostAlign bytes. + llvm::Align PostAlign; BasicBlockInfo() = default; @@ -84,16 +83,16 @@ return Bits; } - /// Compute the offset immediately following this block. If LogAlign is + /// Compute the offset immediately following this block. If Align is /// specified, return the offset the successor block will get if it has /// this alignment. - unsigned postOffset(unsigned LogAlign = 0) const { + unsigned postOffset(llvm::Align Align = llvm::Align::None()) const { unsigned PO = Offset + Size; - unsigned LA = std::max(unsigned(PostAlign), LogAlign); - if (!LA) + const llvm::Align PA = std::max(PostAlign, Align); + if (PA == llvm::Align::None()) return PO; // Add alignment padding from the terminator. - return PO + UnknownPadding(LA, internalKnownBits()); + return PO + UnknownPadding(PA, internalKnownBits()); } /// Compute the number of known low bits of postOffset. If this block @@ -101,8 +100,8 @@ /// instruction alignment. An aligned terminator may increase the number /// of know bits. /// If LogAlign is given, also consider the alignment of the next block. - unsigned postKnownBits(unsigned LogAlign = 0) const { - return std::max(std::max(unsigned(PostAlign), LogAlign), + unsigned postKnownBits(llvm::Align Align = llvm::Align::None()) const { + return std::max(Log2(std::max(PostAlign, Align)), internalKnownBits()); } }; diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp --- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp @@ -47,7 +47,7 @@ BasicBlockInfo &BBI = BBInfo[MBB->getNumber()]; BBI.Size = 0; BBI.Unalign = 0; - BBI.PostAlign = 0; + BBI.PostAlign = llvm::Align::None(); for (MachineInstr &I : *MBB) { BBI.Size += TII->getInstSizeInBytes(I); @@ -62,7 +62,7 @@ // tBR_JTr contains a .align 2 directive. if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) { - BBI.PostAlign = 2; + BBI.PostAlign = llvm::Align(4); MBB->getParent()->ensureAlignment(llvm::Align(4)); } } @@ -126,9 +126,9 @@ for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) { // Get the offset and known bits at the end of the layout predecessor. // Include the alignment of the current block. - unsigned LogAlign = MF.getBlockNumbered(i)->getLogAlignment(); - unsigned Offset = BBInfo[i - 1].postOffset(LogAlign); - unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign); + const llvm::Align Align = MF.getBlockNumbered(i)->getAlignment(); + const unsigned Offset = BBInfo[i - 1].postOffset(Align); + const unsigned KnownBits = BBInfo[i - 1].postKnownBits(Align); // This is where block i begins. Stop if the offset is already correct, // and we have updated 2 blocks. This is the maximum number of blocks diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp --- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -247,7 +247,7 @@ void doInitialJumpTablePlacement(std::vector &CPEMIs); bool BBHasFallthrough(MachineBasicBlock *MBB); CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI); - unsigned getCPELogAlign(const MachineInstr *CPEMI); + llvm::Align getCPEAlign(const MachineInstr *CPEMI); void scanFunctionJumpTables(); void initializeFunctionInfo(const std::vector &CPEMIs); MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI); @@ -337,7 +337,7 @@ dbgs() << format("%08x %bb.%u\t", BBI.Offset, J) << " kb=" << unsigned(BBI.KnownBits) << " ua=" << unsigned(BBI.Unalign) - << " pa=" << unsigned(BBI.PostAlign) + << " pa=" << Log2(BBI.PostAlign) << format(" size=%#x\n", BBInfo[J].Size); } }); @@ -494,11 +494,12 @@ MachineBasicBlock *BB = MF->CreateMachineBasicBlock(); MF->push_back(BB); - // MachineConstantPool measures alignment in bytes. We measure in log2(bytes). - unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment()); + // MachineConstantPool measures alignment in bytes. + const llvm::Align MaxAlign(MCP->getConstantPoolAlignment()); + const unsigned MaxLogAlign = Log2(MaxAlign); // Mark the basic block as required by the const-pool. - BB->setLogAlignment(MaxLogAlign); + BB->setAlignment(MaxAlign); // The function needs to be as aligned as the basic blocks. The linker may // move functions around based on their alignment. @@ -648,29 +649,27 @@ return nullptr; } -/// getCPELogAlign - Returns the required alignment of the constant pool entry -/// represented by CPEMI. Alignment is measured in log2(bytes) units. -unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) { +/// getCPEAlign - Returns the required alignment of the constant pool entry +/// represented by CPEMI. +llvm::Align ARMConstantIslands::getCPEAlign(const MachineInstr *CPEMI) { switch (CPEMI->getOpcode()) { case ARM::CONSTPOOL_ENTRY: break; case ARM::JUMPTABLE_TBB: - return isThumb1 ? 2 : 0; + return isThumb1 ? llvm::Align(4) : llvm::Align(1); case ARM::JUMPTABLE_TBH: - return isThumb1 ? 2 : 1; + return isThumb1 ? llvm::Align(4) : llvm::Align(2); case ARM::JUMPTABLE_INSTS: - return 1; + return llvm::Align(2); case ARM::JUMPTABLE_ADDRS: - return 2; + return llvm::Align(4); default: llvm_unreachable("unknown constpool entry kind"); } unsigned CPI = getCombinedIndex(CPEMI); assert(CPI < MCP->getConstants().size() && "Invalid constant pool index."); - unsigned Align = MCP->getConstants()[CPI].getAlignment(); - assert(isPowerOf2_32(Align) && "Invalid CPE alignment"); - return Log2_32(Align); + return llvm::Align(MCP->getConstants()[CPI].getAlignment()); } /// scanFunctionJumpTables - Do a scan of the function, building up @@ -1023,8 +1022,8 @@ MachineBasicBlock* Water, CPUser &U, unsigned &Growth) { BBInfoVector &BBInfo = BBUtils->getBBInfo(); - unsigned CPELogAlign = getCPELogAlign(U.CPEMI); - unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign); + const llvm::Align CPEAlign = getCPEAlign(U.CPEMI); + const unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPEAlign); unsigned NextBlockOffset; llvm::Align NextBlockAlignment; MachineFunction::const_iterator NextBlock = Water->getIterator(); @@ -1051,7 +1050,7 @@ // in blocks between CPE and the user. if (CPEOffset < UserOffset) UserOffset += - Growth + UnknownPadding(Log2(MF->getAlignment()), CPELogAlign); + Growth + UnknownPadding(MF->getAlignment(), Log2(CPEAlign)); } else // CPE fits in existing padding. Growth = 0; @@ -1217,8 +1216,8 @@ // inserting islands between BB0 and BB1 makes other accesses out of range. MachineBasicBlock *UserBB = U.MI->getParent(); BBInfoVector &BBInfo = BBUtils->getBBInfo(); - unsigned MinNoSplitDisp = - BBInfo[UserBB->getNumber()].postOffset(getCPELogAlign(U.CPEMI)); + const llvm::Align CPEAlign = getCPEAlign(U.CPEMI); + unsigned MinNoSplitDisp = BBInfo[UserBB->getNumber()].postOffset(CPEAlign); if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2) return false; for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();; @@ -1271,7 +1270,7 @@ CPUser &U = CPUsers[CPUserIndex]; MachineInstr *UserMI = U.MI; MachineInstr *CPEMI = U.CPEMI; - unsigned CPELogAlign = getCPELogAlign(CPEMI); + const llvm::Align CPEAlign = getCPEAlign(CPEMI); MachineBasicBlock *UserMBB = UserMI->getParent(); BBInfoVector &BBInfo = BBUtils->getBBInfo(); const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()]; @@ -1284,7 +1283,7 @@ // Size of branch to insert. unsigned Delta = isThumb1 ? 2 : 4; // Compute the offset where the CPE will begin. - unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta; + unsigned CPEOffset = UserBBI.postOffset(CPEAlign) + Delta; if (isOffsetInRange(UserOffset, CPEOffset, U)) { LLVM_DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB) @@ -1325,11 +1324,11 @@ // Try to split the block so it's fully aligned. Compute the latest split // point where we can add a 4-byte branch instruction, and then align to - // LogAlign which is the largest possible alignment in the function. - unsigned LogAlign = Log2(MF->getAlignment()); - assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry"); + // Align which is the largest possible alignment in the function. + const llvm::Align Align = MF->getAlignment(); + assert(Align >= CPEAlign && "Over-aligned constant pool entry"); unsigned KnownBits = UserBBI.internalKnownBits(); - unsigned UPad = UnknownPadding(LogAlign, KnownBits); + unsigned UPad = UnknownPadding(Align, KnownBits); unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad; LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x", BaseInsertOffset)); @@ -1340,7 +1339,7 @@ BaseInsertOffset -= 4; LLVM_DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset) - << " la=" << LogAlign << " kb=" << KnownBits + << " la=" << Log2(Align) << " kb=" << KnownBits << " up=" << UPad << '\n'); // This could point off the end of the block if we've already got constant @@ -1393,8 +1392,8 @@ CPUser &U = CPUsers[CPUIndex]; if (!isOffsetInRange(Offset, EndInsertOffset, U)) { // Shift intertion point by one unit of alignment so it is within reach. - BaseInsertOffset -= 1u << LogAlign; - EndInsertOffset -= 1u << LogAlign; + BaseInsertOffset -= Align.value(); + EndInsertOffset -= Align.value(); } // This is overly conservative, as we don't account for CPEMIs being // reused within the block, but it doesn't matter much. Also assume CPEs @@ -1504,9 +1503,9 @@ // Always align the new block because CP entries can be smaller than 4 // bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may // be an already aligned constant pool block. - const unsigned LogAlign = isThumb ? 1 : 2; - if (NewMBB->getLogAlignment() < LogAlign) - NewMBB->setLogAlignment(LogAlign); + const llvm::Align Align = isThumb ? llvm::Align(2) : llvm::Align(4); + if (NewMBB->getAlignment() < Align) + NewMBB->setAlignment(Align); // Remove the original WaterList entry; we want subsequent insertions in // this vicinity to go after the one we're about to insert. This @@ -1535,7 +1534,7 @@ decrementCPEReferenceCount(CPI, CPEMI); // Mark the basic block as aligned as required by the const-pool entry. - NewIsland->setLogAlignment(getCPELogAlign(U.CPEMI)); + NewIsland->setAlignment(getCPEAlign(U.CPEMI)); // Increase the size of the island block to account for the new entry. BBUtils->adjustBBSize(NewIsland, Size); @@ -1569,10 +1568,11 @@ BBInfo[CPEBB->getNumber()].Size = 0; // This block no longer needs to be aligned. - CPEBB->setLogAlignment(0); - } else + CPEBB->setAlignment(llvm::Align::None()); + } else { // Entries are sorted by descending alignment, so realign from the front. - CPEBB->setLogAlignment(getCPELogAlign(&*CPEBB->begin())); + CPEBB->setAlignment(getCPEAlign(&*CPEBB->begin())); + } BBUtils->adjustBBOffsetsAfter(CPEBB); // An island has only one predecessor BB and one successor BB. Check if diff --git a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp --- a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp +++ b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp @@ -105,12 +105,11 @@ // offset of the current instruction from the start. unsigned InstOffset = 0; for (auto &B : MF) { - if (B.getLogAlignment()) { + if (B.getAlignment() != llvm::Align::None()) { // Although we don't know the exact layout of the final code, we need // to account for alignment padding somehow. This heuristic pads each // aligned basic block according to the alignment value. - int ByteAlign = (1u << B.getLogAlignment()) - 1; - InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign); + InstOffset = alignTo(InstOffset, B.getAlignment()); } OffsetMap[&B] = InstOffset; for (auto &MI : B.instrs()) { diff --git a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp --- a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp @@ -114,12 +114,11 @@ // First pass - compute the offset of each basic block. for (const MachineBasicBlock &MBB : MF) { - if (MBB.getLogAlignment()) { + if (MBB.getAlignment() > 1) { // Although we don't know the exact layout of the final code, we need // to account for alignment padding somehow. This heuristic pads each // aligned basic block according to the alignment value. - int ByteAlign = (1u << MBB.getLogAlignment()) - 1; - InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign); + InstOffset = alignTo(InstOffset, MBB.getAlignment()); } BlockToInstOffset[&MBB] = InstOffset; diff --git a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp --- a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -222,11 +222,8 @@ BasicBlockInfo() = default; - // FIXME: ignore LogAlign for this patch - // - unsigned postOffset(unsigned LogAlign = 0) const { - unsigned PO = Offset + Size; - return PO; + unsigned postOffset() const { + return Offset + Size; } }; @@ -376,7 +373,7 @@ void doInitialPlacement(std::vector &CPEMIs); CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI); - unsigned getCPELogAlign(const MachineInstr &CPEMI); + llvm::Align getCPEAlign(const MachineInstr &CPEMI); void initializeFunctionInfo(const std::vector &CPEMIs); unsigned getOffsetOf(MachineInstr *MI) const; unsigned getUserOffset(CPUser&) const; @@ -534,11 +531,11 @@ MF->push_back(BB); // MachineConstantPool measures alignment in bytes. We measure in log2(bytes). - unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment()); + const llvm::Align MaxAlign(MCP->getConstantPoolAlignment()); // Mark the basic block as required by the const-pool. // If AlignConstantIslands isn't set, use 4-byte alignment for everything. - BB->setLogAlignment(AlignConstantIslands ? MaxLogAlign : 2); + BB->setAlignment(AlignConstantIslands ? MaxAlign : llvm::Align(4)); // The function needs to be as aligned as the basic blocks. The linker may // move functions around based on their alignment. @@ -548,7 +545,7 @@ // alignment of all entries as long as BB is sufficiently aligned. Keep // track of the insertion point for each alignment. We are going to bucket // sort the entries as they are created. - SmallVector InsPoint(MaxLogAlign + 1, + SmallVector InsPoint(Log2(MaxAlign) + 1, BB->end()); // Add all of the constants from the constant pool to the end block, use an @@ -577,7 +574,7 @@ // Ensure that future entries with higher alignment get inserted before // CPEMI. This is bucket sort with iterators. - for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a) + for (unsigned a = LogAlign + 1; a <= Log2(MaxAlign); ++a) if (InsPoint[a] == InsAt) InsPoint[a] = CPEMI; // Add a new CPEntry, but no corresponding CPUser yet. @@ -622,20 +619,18 @@ return nullptr; } -/// getCPELogAlign - Returns the required alignment of the constant pool entry +/// getCPEAlign - Returns the required alignment of the constant pool entry /// represented by CPEMI. Alignment is measured in log2(bytes) units. -unsigned MipsConstantIslands::getCPELogAlign(const MachineInstr &CPEMI) { +llvm::Align MipsConstantIslands::getCPEAlign(const MachineInstr &CPEMI) { assert(CPEMI.getOpcode() == Mips::CONSTPOOL_ENTRY); // Everything is 4-byte aligned unless AlignConstantIslands is set. if (!AlignConstantIslands) - return 2; + return llvm::Align(4); unsigned CPI = CPEMI.getOperand(1).getIndex(); assert(CPI < MCP->getConstants().size() && "Invalid constant pool index."); - unsigned Align = MCP->getConstants()[CPI].getAlignment(); - assert(isPowerOf2_32(Align) && "Invalid CPE alignment"); - return Log2_32(Align); + return llvm::Align(MCP->getConstants()[CPI].getAlignment()); } /// initializeFunctionInfo - Do the initial scan of the function, building up @@ -941,8 +936,7 @@ bool MipsConstantIslands::isWaterInRange(unsigned UserOffset, MachineBasicBlock* Water, CPUser &U, unsigned &Growth) { - unsigned CPELogAlign = getCPELogAlign(*U.CPEMI); - unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign); + unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(); unsigned NextBlockOffset; llvm::Align NextBlockAlignment; MachineFunction::const_iterator NextBlock = ++Water->getIterator(); @@ -1223,7 +1217,6 @@ CPUser &U = CPUsers[CPUserIndex]; MachineInstr *UserMI = U.MI; MachineInstr *CPEMI = U.CPEMI; - unsigned CPELogAlign = getCPELogAlign(*CPEMI); MachineBasicBlock *UserMBB = UserMI->getParent(); const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()]; @@ -1233,7 +1226,7 @@ // Size of branch to insert. unsigned Delta = 2; // Compute the offset where the CPE will begin. - unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta; + unsigned CPEOffset = UserBBI.postOffset() + Delta; if (isOffsetInRange(UserOffset, CPEOffset, U)) { LLVM_DEBUG(dbgs() << "Split at end of " << printMBBReference(*UserMBB) @@ -1259,9 +1252,8 @@ // Try to split the block so it's fully aligned. Compute the latest split // point where we can add a 4-byte branch instruction, and then align to - // LogAlign which is the largest possible alignment in the function. - unsigned LogAlign = Log2(MF->getAlignment()); - assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry"); + // Align which is the largest possible alignment in the function. + const llvm::Align Align = MF->getAlignment(); unsigned BaseInsertOffset = UserOffset + U.getMaxDisp(); LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x", BaseInsertOffset)); @@ -1272,7 +1264,7 @@ BaseInsertOffset -= 4; LLVM_DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset) - << " la=" << LogAlign << '\n'); + << " la=" << Log2(Align) << '\n'); // This could point off the end of the block if we've already got constant // pool entries following this block; only the last one is in the water list. @@ -1297,8 +1289,8 @@ CPUser &U = CPUsers[CPUIndex]; if (!isOffsetInRange(Offset, EndInsertOffset, U)) { // Shift intertion point by one unit of alignment so it is within reach. - BaseInsertOffset -= 1u << LogAlign; - EndInsertOffset -= 1u << LogAlign; + BaseInsertOffset -= Align.value(); + EndInsertOffset -= Align.value(); } // This is overly conservative, as we don't account for CPEMIs being // reused within the block, but it doesn't matter much. Also assume CPEs @@ -1401,7 +1393,7 @@ ++NumCPEs; // Mark the basic block as aligned as required by the const-pool entry. - NewIsland->setLogAlignment(getCPELogAlign(*U.CPEMI)); + NewIsland->setAlignment(getCPEAlign(*U.CPEMI)); // Increase the size of the island block to account for the new entry. BBInfo[NewIsland->getNumber()].Size += Size; @@ -1433,10 +1425,11 @@ BBInfo[CPEBB->getNumber()].Size = 0; // This block no longer needs to be aligned. - CPEBB->setLogAlignment(0); - } else + CPEBB->setAlignment(llvm::Align(1)); + } else { // Entries are sorted by descending alignment, so realign from the front. - CPEBB->setLogAlignment(getCPELogAlign(*CPEBB->begin())); + CPEBB->setAlignment(getCPEAlign(*CPEBB->begin())); + } adjustBBOffsetsAfter(CPEBB); // An island has only one predecessor BB and one successor BB. Check if @@ -1531,7 +1524,7 @@ // We should have a way to back out this alignment restriction if we "can" later. // but it is not harmful. // - DestBB->setLogAlignment(2); + DestBB->setAlignment(llvm::Align(4)); Br.MaxDisp = ((1<<24)-1) * 2; MI->setDesc(TII->get(Mips::JalB16)); } diff --git a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp --- a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp +++ b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp @@ -178,21 +178,20 @@ const MachineBasicBlock *Dest, unsigned BrOffset) { int BranchSize; - unsigned MaxLogAlign = 2; + llvm::Align MaxAlign = llvm::Align(4); bool NeedExtraAdjustment = false; if (Dest->getNumber() <= Src->getNumber()) { // If this is a backwards branch, the delta is the offset from the // start of this block to this branch, plus the sizes of all blocks // from this block to the dest. BranchSize = BrOffset; - MaxLogAlign = std::max(MaxLogAlign, Src->getLogAlignment()); + MaxAlign = std::max(MaxAlign, Src->getAlignment()); int DestBlock = Dest->getNumber(); BranchSize += BlockSizes[DestBlock].first; for (unsigned i = DestBlock+1, e = Src->getNumber(); i < e; ++i) { BranchSize += BlockSizes[i].first; - MaxLogAlign = - std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment()); + MaxAlign = std::max(MaxAlign, Fn.getBlockNumbered(i)->getAlignment()); } NeedExtraAdjustment = (FirstImpreciseBlock >= 0) && @@ -203,11 +202,10 @@ unsigned StartBlock = Src->getNumber(); BranchSize = BlockSizes[StartBlock].first - BrOffset; - MaxLogAlign = std::max(MaxLogAlign, Dest->getLogAlignment()); + MaxAlign = std::max(MaxAlign, Dest->getAlignment()); for (unsigned i = StartBlock+1, e = Dest->getNumber(); i != e; ++i) { BranchSize += BlockSizes[i].first; - MaxLogAlign = - std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment()); + MaxAlign = std::max(MaxAlign, Fn.getBlockNumbered(i)->getAlignment()); } NeedExtraAdjustment = (FirstImpreciseBlock >= 0) && @@ -257,7 +255,7 @@ // The computed offset is at most ((1 << alignment) - 4) bytes smaller // than actual offset. So we add this number to the offset for safety. if (NeedExtraAdjustment) - BranchSize += (1 << MaxLogAlign) - 4; + BranchSize += MaxAlign.value() - 4; return BranchSize; } diff --git a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp --- a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp +++ b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp @@ -85,9 +85,9 @@ // This value never changes. uint64_t Size = 0; - // The minimum alignment of the block, as a log2 value. + // The minimum alignment of the block. // This value never changes. - unsigned LogAlignment = 0; + llvm::Align Alignment; // The number of terminators in this block. This value never changes. unsigned NumTerminators = 0; @@ -179,17 +179,16 @@ // instructions. void SystemZLongBranch::skipNonTerminators(BlockPosition &Position, MBBInfo &Block) { - if (Block.LogAlignment > Position.KnownBits) { + if (Log2(Block.Alignment) > Position.KnownBits) { // When calculating the address of Block, we need to conservatively // assume that Block had the worst possible misalignment. - Position.Address += ((uint64_t(1) << Block.LogAlignment) - - (uint64_t(1) << Position.KnownBits)); - Position.KnownBits = Block.LogAlignment; + Position.Address += + (Block.Alignment.value() - (uint64_t(1) << Position.KnownBits)); + Position.KnownBits = Log2(Block.Alignment); } // Align the addresses. - uint64_t AlignMask = (uint64_t(1) << Block.LogAlignment) - 1; - Position.Address = (Position.Address + AlignMask) & ~AlignMask; + Position.Address = alignTo(Position.Address, Block.Alignment); // Record the block's position. Block.Address = Position.Address; @@ -282,7 +281,7 @@ MBBInfo &Block = MBBs[I]; // Record the alignment, for quick access. - Block.LogAlignment = MBB->getLogAlignment(); + Block.Alignment = MBB->getAlignment(); // Calculate the size of the fixed part of the block. MachineBasicBlock::iterator MI = MBB->begin(); diff --git a/llvm/lib/Target/X86/X86RetpolineThunks.cpp b/llvm/lib/Target/X86/X86RetpolineThunks.cpp --- a/llvm/lib/Target/X86/X86RetpolineThunks.cpp +++ b/llvm/lib/Target/X86/X86RetpolineThunks.cpp @@ -279,7 +279,7 @@ CallTarget->addLiveIn(Reg); CallTarget->setHasAddressTaken(); - CallTarget->setLogAlignment(4); + CallTarget->setAlignment(llvm::Align(16)); insertRegReturnAddrClobber(*CallTarget, Reg); CallTarget->back().setPreInstrSymbol(MF, TargetSym); BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc));