diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -371,8 +371,6 @@ return N; } - unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); } - unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); } bool shouldBuildLookupTables() { diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h --- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h +++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h @@ -105,7 +105,7 @@ /// Alignment of the basic block. Zero if the basic block does not need to be /// aligned. The alignment is specified as log2(bytes). - unsigned Alignment = 0; + unsigned LogAlignment = 0; /// Indicate that this basic block is entered via an exception handler. bool IsEHPad = false; @@ -374,11 +374,11 @@ /// Return alignment of the basic block. The alignment is specified as /// log2(bytes). - unsigned getAlignment() const { return Alignment; } + unsigned getLogAlignment() const { return LogAlignment; } /// Set alignment of the basic block. The alignment is specified as /// log2(bytes). - void setAlignment(unsigned Align) { Alignment = Align; } + void setLogAlignment(unsigned A) { LogAlignment = A; } /// Returns true if the block is a landing pad. That is this basic block is /// entered via an exception handler. diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h --- a/llvm/include/llvm/CodeGen/MachineFunction.h +++ b/llvm/include/llvm/CodeGen/MachineFunction.h @@ -277,7 +277,7 @@ unsigned FunctionNumber; /// Alignment - The alignment of the function. - unsigned Alignment; + unsigned LogAlignment; /// ExposesReturnsTwice - True if the function calls setjmp or related /// functions with attribute "returns twice", but doesn't have @@ -508,15 +508,16 @@ const WinEHFuncInfo *getWinEHFuncInfo() const { return WinEHInfo; } WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; } - /// getAlignment - Return the alignment (log2, not bytes) of the function. - unsigned getAlignment() const { return Alignment; } + /// getLogAlignment - Return the alignment of the function. + unsigned getLogAlignment() const { return LogAlignment; } - /// setAlignment - Set the alignment (log2, not bytes) of the function. - void setAlignment(unsigned A) { Alignment = A; } + /// setLogAlignment - Set the alignment of the function. + void setLogAlignment(unsigned A) { LogAlignment = A; } /// ensureAlignment - Make sure the function is at least 1 << A bytes aligned. - void ensureAlignment(unsigned A) { - if (Alignment < A) Alignment = A; + void ensureLogAlignment(unsigned A) { + if (LogAlignment < A) + LogAlignment = A; } /// exposesReturnsTwice - Returns true if the function calls setjmp or diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -1580,30 +1580,24 @@ return JumpBufSize; } - /// Returns the target's jmp_buf alignment in bytes (if never set, the default - /// is 0) - unsigned getJumpBufAlignment() const { - return JumpBufAlignment; - } - /// Return the minimum stack alignment of an argument. unsigned getMinStackArgumentAlignment() const { return MinStackArgumentAlignment; } /// Return the minimum function alignment. - unsigned getMinFunctionAlignment() const { - return MinFunctionAlignment; + unsigned getMinFunctionLogAlignment() const { + return MinFunctionLogAlignment; } /// Return the preferred function alignment. - unsigned getPrefFunctionAlignment() const { - return PrefFunctionAlignment; + unsigned getPrefFunctionLogAlignment() const { + return PrefFunctionLogAlignment; } /// Return the preferred loop alignment. - virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const { - return PrefLoopAlignment; + virtual unsigned getPrefLoopLogAlignment(MachineLoop *ML = nullptr) const { + return PrefLoopLogAlignment; } /// Should loops be aligned even when the function is marked OptSize (but not @@ -2114,30 +2108,24 @@ JumpBufSize = Size; } - /// Set the target's required jmp_buf buffer alignment (in bytes); default is - /// 0 - void setJumpBufAlignment(unsigned Align) { - JumpBufAlignment = Align; - } - /// Set the target's minimum function alignment (in log2(bytes)) - void setMinFunctionAlignment(unsigned Align) { - MinFunctionAlignment = Align; + void setMinFunctionLogAlignment(unsigned LogAlign) { + MinFunctionLogAlignment = LogAlign; } /// Set the target's preferred function alignment. This should be set if /// there is a performance benefit to higher-than-minimum alignment (in /// log2(bytes)) - void setPrefFunctionAlignment(unsigned Align) { - PrefFunctionAlignment = Align; + void setPrefFunctionLogAlignment(unsigned LogAlign) { + PrefFunctionLogAlignment = LogAlign; } /// Set the target's preferred loop alignment. Default alignment is zero, it /// means the target does not care about loop alignment. The alignment is /// specified in log2(bytes). The target may also override /// getPrefLoopAlignment to provide per-loop values. - void setPrefLoopAlignment(unsigned Align) { - PrefLoopAlignment = Align; + void setPrefLoopAlignment(unsigned LogAlign) { + PrefLoopLogAlignment = LogAlign; } /// Set the minimum stack alignment of an argument (in log2(bytes)). @@ -2706,22 +2694,19 @@ /// The size, in bytes, of the target's jmp_buf buffers unsigned JumpBufSize; - /// The alignment, in bytes, of the target's jmp_buf buffers - unsigned JumpBufAlignment; - /// The minimum alignment that any argument on the stack needs to have. unsigned MinStackArgumentAlignment; /// The minimum function alignment (used when optimizing for size, and to /// prevent explicitly provided alignment from leading to incorrect code). - unsigned MinFunctionAlignment; + unsigned MinFunctionLogAlignment; /// The preferred function alignment (used when alignment unspecified and /// optimizing for speed). - unsigned PrefFunctionAlignment; + unsigned PrefFunctionLogAlignment; - /// The preferred loop alignment. - unsigned PrefLoopAlignment; + /// The preferred loop alignment (in log2 bot in bytes). + unsigned PrefLoopLogAlignment; /// Size in bits of the maximum atomics size the backend supports. /// Accesses larger than this will be expanded by AtomicExpandPass. diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -666,7 +666,7 @@ EmitLinkage(&F, CurrentFnSym); if (MAI->hasFunctionAlignment()) - EmitAlignment(MF->getAlignment(), &F); + EmitAlignment(MF->getLogAlignment(), &F); if (MAI->hasDotTypeDotSizeDirective()) OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction); @@ -2905,8 +2905,8 @@ } // Emit an alignment directive for this block, if needed. - if (unsigned Align = MBB.getAlignment()) - EmitAlignment(Align); + if (unsigned LogAlign = MBB.getLogAlignment()) + EmitAlignment(LogAlign); MCCodePaddingContext Context; setupCodePaddingContext(MBB, Context); OutStreamer->EmitCodePaddingBasicBlockStart(Context); diff --git a/llvm/lib/CodeGen/AsmPrinter/WinException.cpp b/llvm/lib/CodeGen/AsmPrinter/WinException.cpp --- a/llvm/lib/CodeGen/AsmPrinter/WinException.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/WinException.cpp @@ -203,8 +203,8 @@ // We want our funclet's entry point to be aligned such that no nops will be // present after the label. - Asm->EmitAlignment(std::max(Asm->MF->getAlignment(), MBB.getAlignment()), - &F); + Asm->EmitAlignment( + std::max(Asm->MF->getLogAlignment(), MBB.getLogAlignment()), &F); // Now that we've emitted the alignment directive, point at our funclet. Asm->OutStreamer->EmitLabel(Sym); diff --git a/llvm/lib/CodeGen/BranchRelaxation.cpp b/llvm/lib/CodeGen/BranchRelaxation.cpp --- a/llvm/lib/CodeGen/BranchRelaxation.cpp +++ b/llvm/lib/CodeGen/BranchRelaxation.cpp @@ -65,13 +65,13 @@ /// block. unsigned postOffset(const MachineBasicBlock &MBB) const { unsigned PO = Offset + Size; - unsigned Align = MBB.getAlignment(); - if (Align == 0) + unsigned LogAlign = MBB.getLogAlignment(); + if (LogAlign == 0) return PO; - unsigned AlignAmt = 1 << Align; - unsigned ParentAlign = MBB.getParent()->getAlignment(); - if (Align <= ParentAlign) + unsigned AlignAmt = 1 << LogAlign; + unsigned ParentLogAlign = MBB.getParent()->getLogAlignment(); + if (LogAlign <= ParentLogAlign) return PO + OffsetToAlignment(PO, AlignAmt); // The alignment of this MBB is larger than the function's alignment, so we @@ -128,9 +128,9 @@ #ifndef NDEBUG unsigned PrevNum = MF->begin()->getNumber(); for (MachineBasicBlock &MBB : *MF) { - unsigned Align = MBB.getAlignment(); + unsigned LogAlign = MBB.getLogAlignment(); unsigned Num = MBB.getNumber(); - assert(BlockInfo[Num].Offset % (1u << Align) == 0); + assert(BlockInfo[Num].Offset % (1u << LogAlign) == 0); assert(!Num || BlockInfo[PrevNum].postOffset(MBB) <= BlockInfo[Num].Offset); assert(BlockInfo[Num].Size == computeBlockSize(MBB)); PrevNum = Num; diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -640,7 +640,8 @@ return error(Loc, Twine("redefinition of machine basic block with id #") + Twine(ID)); if (Alignment) - MBB->setAlignment(Alignment); + MBB->setLogAlignment(Alignment); /// !!! from tests, it seems like alignment + /// is encoded as pow of 2. if (HasAddressTaken) MBB->setHasAddressTaken(); MBB->setIsEHPad(IsLandingPad); diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp --- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp @@ -393,7 +393,8 @@ } if (YamlMF.Alignment) - MF.setAlignment(YamlMF.Alignment); + MF.setLogAlignment(YamlMF.Alignment); /// !!! from tests, it seems like + /// alignment is encoded as pow of 2. MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice); MF.setHasWinCFI(YamlMF.HasWinCFI); diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp --- a/llvm/lib/CodeGen/MIRPrinter.cpp +++ b/llvm/lib/CodeGen/MIRPrinter.cpp @@ -197,7 +197,8 @@ yaml::MachineFunction YamlMF; YamlMF.Name = MF.getName(); - YamlMF.Alignment = MF.getAlignment(); + YamlMF.Alignment = MF.getLogAlignment(); /// !!! from tests, it seems like + /// alignment is encoded as pow of 2. YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice(); YamlMF.HasWinCFI = MF.hasWinCFI(); @@ -628,9 +629,11 @@ OS << "landing-pad"; HasAttributes = true; } - if (MBB.getAlignment()) { + if (MBB.getLogAlignment()) { OS << (HasAttributes ? ", " : " ("); - OS << "align " << MBB.getAlignment(); + OS << "align " + << MBB.getLogAlignment(); /// !!! from tests, it seems like alignment is + /// encoded as pow of 2. HasAttributes = true; } if (HasAttributes) diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp --- a/llvm/lib/CodeGen/MachineBasicBlock.cpp +++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp @@ -320,9 +320,9 @@ OS << "landing-pad"; HasAttributes = true; } - if (getAlignment()) { + if (getLogAlignment()) { OS << (HasAttributes ? ", " : " ("); - OS << "align " << getAlignment(); + OS << "align " << getLogAlignment(); HasAttributes = true; } if (HasAttributes) diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -2763,8 +2763,8 @@ if (!L) continue; - unsigned Align = TLI->getPrefLoopAlignment(L); - if (!Align) + unsigned LogAlign = TLI->getPrefLoopLogAlignment(L); + if (!LogAlign) continue; // Don't care about loop alignment. // If the block is cold relative to the function entry don't waste space @@ -2788,7 +2788,7 @@ // Force alignment if all the predecessors are jumps. We already checked // that the block isn't cold above. if (!LayoutPred->isSuccessor(ChainBB)) { - ChainBB->setAlignment(Align); + ChainBB->setLogAlignment(LogAlign); continue; } @@ -2800,7 +2800,7 @@ MBPI->getEdgeProbability(LayoutPred, ChainBB); BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb; if (LayoutEdgeFreq <= (Freq * ColdProb)) - ChainBB->setAlignment(Align); + ChainBB->setLogAlignment(LogAlign); } } @@ -3062,14 +3062,14 @@ if (AlignAllBlock) // Align all of the blocks in the function to a specific alignment. for (MachineBasicBlock &MBB : MF) - MBB.setAlignment(AlignAllBlock); + MBB.setLogAlignment(AlignAllBlock); // !!! else if (AlignAllNonFallThruBlocks) { // Align all of the blocks that have no fall-through predecessors to a // specific alignment. for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) { auto LayoutPred = std::prev(MBI); if (!LayoutPred->isSuccessor(&*MBI)) - MBI->setAlignment(AlignAllNonFallThruBlocks); + MBI->setLogAlignment(AlignAllNonFallThruBlocks); // !!! } } if (ViewBlockLayoutWithBFI != GVDT_None && diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -172,16 +172,16 @@ FrameInfo->ensureMaxAlignment(F.getFnStackAlignment()); ConstantPool = new (Allocator) MachineConstantPool(getDataLayout()); - Alignment = STI->getTargetLowering()->getMinFunctionAlignment(); + LogAlignment = STI->getTargetLowering()->getMinFunctionLogAlignment(); // FIXME: Shouldn't use pref alignment if explicit alignment is set on F. // FIXME: Use Function::hasOptSize(). if (!F.hasFnAttribute(Attribute::OptimizeForSize)) - Alignment = std::max(Alignment, - STI->getTargetLowering()->getPrefFunctionAlignment()); + LogAlignment = std::max( + LogAlignment, STI->getTargetLowering()->getPrefFunctionLogAlignment()); if (AlignAllFunctions) - Alignment = AlignAllFunctions; + LogAlignment = AlignAllFunctions; /// !!! JumpTableInfo = nullptr; diff --git a/llvm/lib/CodeGen/PatchableFunction.cpp b/llvm/lib/CodeGen/PatchableFunction.cpp --- a/llvm/lib/CodeGen/PatchableFunction.cpp +++ b/llvm/lib/CodeGen/PatchableFunction.cpp @@ -78,7 +78,7 @@ MIB.add(MO); FirstActualI->eraseFromParent(); - MF.ensureAlignment(4); + MF.ensureLogAlignment(4); return true; } diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -579,10 +579,9 @@ BooleanVectorContents = UndefinedBooleanContent; SchedPreferenceInfo = Sched::ILP; JumpBufSize = 0; - JumpBufAlignment = 0; - MinFunctionAlignment = 0; - PrefFunctionAlignment = 0; - PrefLoopAlignment = 0; + MinFunctionLogAlignment = 0; + PrefFunctionLogAlignment = 0; + PrefLoopLogAlignment = 0; GatherAllAliasesMaxDepth = 18; MinStackArgumentAlignment = 1; // TODO: the default will be switched to 0 in the next commit, along diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -640,9 +640,9 @@ EnableExtLdPromotion = true; // Set required alignment. - setMinFunctionAlignment(2); + setMinFunctionLogAlignment(2); // Set preferred alignments. - setPrefFunctionAlignment(STI.getPrefFunctionAlignment()); + setPrefFunctionLogAlignment(STI.getPrefFunctionLogAlignment()); setPrefLoopAlignment(STI.getPrefLoopAlignment()); // Only change the limit for entries in a jump table if specified by diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h --- a/llvm/lib/Target/AArch64/AArch64Subtarget.h +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h @@ -198,8 +198,8 @@ uint16_t PrefetchDistance = 0; uint16_t MinPrefetchStride = 1; unsigned MaxPrefetchIterationsAhead = UINT_MAX; - unsigned PrefFunctionAlignment = 0; - unsigned PrefLoopAlignment = 0; + unsigned PrefFunctionLogAlignment = 0; + unsigned PrefLoopLogAlignment = 0; unsigned MaxJumpTableSize = 0; unsigned WideningBaseCost = 0; @@ -359,8 +359,10 @@ unsigned getMaxPrefetchIterationsAhead() const { return MaxPrefetchIterationsAhead; } - unsigned getPrefFunctionAlignment() const { return PrefFunctionAlignment; } - unsigned getPrefLoopAlignment() const { return PrefLoopAlignment; } + unsigned getPrefFunctionLogAlignment() const { + return PrefFunctionLogAlignment; + } + unsigned getPrefLoopAlignment() const { return PrefLoopLogAlignment; } unsigned getMaximumJumpTableSize() const { return MaxJumpTableSize; } diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp --- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp @@ -71,13 +71,13 @@ case CortexA35: break; case CortexA53: - PrefFunctionAlignment = 3; + PrefFunctionLogAlignment = 3; break; case CortexA55: break; case CortexA57: MaxInterleaveFactor = 4; - PrefFunctionAlignment = 4; + PrefFunctionLogAlignment = 4; break; case CortexA65: break; @@ -85,7 +85,7 @@ case CortexA73: case CortexA75: case CortexA76: - PrefFunctionAlignment = 4; + PrefFunctionLogAlignment = 4; break; case Cyclone: CacheLineSize = 64; @@ -96,14 +96,14 @@ case ExynosM1: MaxInterleaveFactor = 4; MaxJumpTableSize = 8; - PrefFunctionAlignment = 4; - PrefLoopAlignment = 3; + PrefFunctionLogAlignment = 4; + PrefLoopLogAlignment = 3; break; case ExynosM3: MaxInterleaveFactor = 4; MaxJumpTableSize = 20; - PrefFunctionAlignment = 5; - PrefLoopAlignment = 4; + PrefFunctionLogAlignment = 5; + PrefLoopLogAlignment = 4; break; case Falkor: MaxInterleaveFactor = 4; @@ -127,7 +127,7 @@ case NeoverseE1: break; case NeoverseN1: - PrefFunctionAlignment = 4; + PrefFunctionLogAlignment = 4; break; case Saphira: MaxInterleaveFactor = 4; @@ -136,8 +136,8 @@ break; case ThunderX2T99: CacheLineSize = 64; - PrefFunctionAlignment = 3; - PrefLoopAlignment = 2; + PrefFunctionLogAlignment = 3; + PrefLoopLogAlignment = 2; MaxInterleaveFactor = 4; PrefetchDistance = 128; MinPrefetchStride = 1024; @@ -150,15 +150,15 @@ case ThunderXT81: case ThunderXT83: CacheLineSize = 128; - PrefFunctionAlignment = 3; - PrefLoopAlignment = 2; + PrefFunctionLogAlignment = 3; + PrefLoopLogAlignment = 2; // FIXME: remove this to enable 64-bit SLP if performance looks good. MinVectorRegisterBitWidth = 128; break; case TSV110: CacheLineSize = 64; - PrefFunctionAlignment = 4; - PrefLoopAlignment = 2; + PrefFunctionLogAlignment = 4; + PrefLoopLogAlignment = 2; break; } } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -417,7 +417,7 @@ // The starting address of all shader programs must be 256 bytes aligned. // Regular functions just need the basic required instruction alignment. - MF.setAlignment(MFI->isEntryFunction() ? 8 : 2); + MF.setLogAlignment(MFI->isEntryFunction() ? 8 : 2); SetupMachineFunction(MF); diff --git a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp --- a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp @@ -104,7 +104,7 @@ // Functions needs to be cacheline (256B) aligned. - MF.ensureAlignment(8); + MF.ensureLogAlignment(8); SetupMachineFunction(MF); diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -379,8 +379,7 @@ unsigned Depth = 0) const override; AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; - unsigned getPrefLoopAlignment(MachineLoop *ML) const override; - + unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override; void allocateHSAUserSGPRs(CCState &CCInfo, MachineFunction &MF, diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -10669,15 +10669,15 @@ Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); } -unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { - const unsigned PrefAlign = TargetLowering::getPrefLoopAlignment(ML); - const unsigned CacheLineAlign = 6; // log2(64) +unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const { + const unsigned PrefLogAlign = TargetLowering::getPrefLoopLogAlignment(ML); + const unsigned CacheLineLogAlign = 6; // log2(64) // Pre-GFX10 target did not benefit from loop alignment if (!ML || DisableLoopAlignment || (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) || getSubtarget()->hasInstFwdPrefetchBug()) - return PrefAlign; + return PrefLogAlign; // On GFX10 I$ is 4 x 64 bytes cache lines. // By default prefetcher keeps one cache line behind and reads two ahead. @@ -10691,28 +10691,28 @@ const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); const MachineBasicBlock *Header = ML->getHeader(); - if (Header->getAlignment() != PrefAlign) - return Header->getAlignment(); // Already processed. + if (Header->getLogAlignment() != PrefLogAlign) + return Header->getLogAlignment(); // Already processed. unsigned LoopSize = 0; for (const MachineBasicBlock *MBB : ML->blocks()) { // If inner loop block is aligned assume in average half of the alignment // size to be added as nops. if (MBB != Header) - LoopSize += (1 << MBB->getAlignment()) / 2; + LoopSize += (1 << MBB->getLogAlignment()) / 2; for (const MachineInstr &MI : *MBB) { LoopSize += TII->getInstSizeInBytes(MI); if (LoopSize > 192) - return PrefAlign; + return PrefLogAlign; } } if (LoopSize <= 64) - return PrefAlign; + return PrefLogAlign; if (LoopSize <= 128) - return CacheLineAlign; + return CacheLineLogAlign; // If any of parent loops is surrounded by prefetch instructions do not // insert new for inner loop, which would reset parent's settings. @@ -10720,7 +10720,7 @@ if (MachineBasicBlock *Exit = P->getExitBlock()) { auto I = Exit->getFirstNonDebugInstr(); if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH) - return CacheLineAlign; + return CacheLineLogAlign; } } @@ -10737,7 +10737,7 @@ .addImm(2); // prefetch 1 line behind PC } - return CacheLineAlign; + return CacheLineLogAlign; } LLVM_ATTRIBUTE_UNUSED diff --git a/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h b/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h --- a/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h +++ b/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h @@ -35,7 +35,7 @@ : ReturnStackOffsetSet(false), VarArgsFrameIndex(0), ReturnStackOffset(-1U), MaxCallStackReq(0) { // Functions are 4-byte (2**2) aligned. - MF.setAlignment(2); + MF.setLogAlignment(2); } ~ARCFunctionInfo() {} diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp --- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp @@ -63,7 +63,7 @@ // tBR_JTr contains a .align 2 directive. if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) { BBI.PostAlign = 2; - MBB->getParent()->ensureAlignment(2); + MBB->getParent()->ensureLogAlignment(2); } } @@ -126,7 +126,7 @@ for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) { // Get the offset and known bits at the end of the layout predecessor. // Include the alignment of the current block. - unsigned LogAlign = MF.getBlockNumbered(i)->getAlignment(); + unsigned LogAlign = MF.getBlockNumbered(i)->getLogAlignment(); unsigned Offset = BBInfo[i - 1].postOffset(LogAlign); unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign); diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp --- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -398,7 +398,7 @@ // Functions with jump tables need an alignment of 4 because they use the ADR // instruction, which aligns the PC to 4 bytes before adding an offset. if (!T2JumpTables.empty()) - MF->ensureAlignment(2); + MF->ensureLogAlignment(2); /// Remove dead constant pool entries. MadeChange |= removeUnusedCPEntries(); @@ -488,20 +488,21 @@ MF->push_back(BB); // MachineConstantPool measures alignment in bytes. We measure in log2(bytes). - unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment()); + unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment()); // Mark the basic block as required by the const-pool. - BB->setAlignment(MaxAlign); + BB->setLogAlignment(MaxLogAlign); // The function needs to be as aligned as the basic blocks. The linker may // move functions around based on their alignment. - MF->ensureAlignment(BB->getAlignment()); + MF->ensureLogAlignment(BB->getLogAlignment()); // Order the entries in BB by descending alignment. That ensures correct // alignment of all entries as long as BB is sufficiently aligned. Keep // track of the insertion point for each alignment. We are going to bucket // sort the entries as they are created. - SmallVector InsPoint(MaxAlign + 1, BB->end()); + SmallVector InsPoint(MaxLogAlign + 1, + BB->end()); // Add all of the constants from the constant pool to the end block, use an // identity mapping of CPI's to CPE's. @@ -526,7 +527,7 @@ // Ensure that future entries with higher alignment get inserted before // CPEMI. This is bucket sort with iterators. - for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a) + for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a) if (InsPoint[a] == InsAt) InsPoint[a] = CPEMI; @@ -687,7 +688,7 @@ BBInfoVector &BBInfo = BBUtils->getBBInfo(); // The known bits of the entry block offset are determined by the function // alignment. - BBInfo.front().KnownBits = MF->getAlignment(); + BBInfo.front().KnownBits = MF->getLogAlignment(); // Compute block offsets and known bits. BBUtils->adjustBBOffsetsAfter(&MF->front()); @@ -1004,14 +1005,14 @@ BBInfoVector &BBInfo = BBUtils->getBBInfo(); unsigned CPELogAlign = getCPELogAlign(U.CPEMI); unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign); - unsigned NextBlockOffset, NextBlockAlignment; + unsigned NextBlockOffset, NextBlockLogAlignment; MachineFunction::const_iterator NextBlock = Water->getIterator(); if (++NextBlock == MF->end()) { NextBlockOffset = BBInfo[Water->getNumber()].postOffset(); - NextBlockAlignment = 0; + NextBlockLogAlignment = 0; } else { NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset; - NextBlockAlignment = NextBlock->getAlignment(); + NextBlockLogAlignment = NextBlock->getLogAlignment(); } unsigned Size = U.CPEMI->getOperand(2).getImm(); unsigned CPEEnd = CPEOffset + Size; @@ -1023,13 +1024,13 @@ Growth = CPEEnd - NextBlockOffset; // Compute the padding that would go at the end of the CPE to align the next // block. - Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment); + Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockLogAlignment); // If the CPE is to be inserted before the instruction, that will raise // the offset of the instruction. Also account for unknown alignment padding // in blocks between CPE and the user. if (CPEOffset < UserOffset) - UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign); + UserOffset += Growth + UnknownPadding(MF->getLogAlignment(), CPELogAlign); } else // CPE fits in existing padding. Growth = 0; @@ -1304,7 +1305,7 @@ // Try to split the block so it's fully aligned. Compute the latest split // point where we can add a 4-byte branch instruction, and then align to // LogAlign which is the largest possible alignment in the function. - unsigned LogAlign = MF->getAlignment(); + unsigned LogAlign = MF->getLogAlignment(); assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry"); unsigned KnownBits = UserBBI.internalKnownBits(); unsigned UPad = UnknownPadding(LogAlign, KnownBits); @@ -1482,9 +1483,9 @@ // Always align the new block because CP entries can be smaller than 4 // bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may // be an already aligned constant pool block. - const unsigned Align = isThumb ? 1 : 2; - if (NewMBB->getAlignment() < Align) - NewMBB->setAlignment(Align); + const unsigned LogAlign = isThumb ? 1 : 2; + if (NewMBB->getLogAlignment() < LogAlign) + NewMBB->setLogAlignment(LogAlign); // Remove the original WaterList entry; we want subsequent insertions in // this vicinity to go after the one we're about to insert. This @@ -1513,7 +1514,7 @@ decrementCPEReferenceCount(CPI, CPEMI); // Mark the basic block as aligned as required by the const-pool entry. - NewIsland->setAlignment(getCPELogAlign(U.CPEMI)); + NewIsland->setLogAlignment(getCPELogAlign(U.CPEMI)); // Increase the size of the island block to account for the new entry. BBUtils->adjustBBSize(NewIsland, Size); @@ -1547,10 +1548,10 @@ BBInfo[CPEBB->getNumber()].Size = 0; // This block no longer needs to be aligned. - CPEBB->setAlignment(0); + CPEBB->setLogAlignment(0); } else // Entries are sorted by descending alignment, so realign from the front. - CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin())); + CPEBB->setLogAlignment(getCPELogAlign(&*CPEBB->begin())); BBUtils->adjustBBOffsetsAfter(CPEBB); // An island has only one predecessor BB and one successor BB. Check if diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -1392,7 +1392,7 @@ setPrefLoopAlignment(Subtarget->getPrefLoopAlignment()); - setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); + setMinFunctionLogAlignment(Subtarget->isThumb() ? 1 : 2); if (Subtarget->isThumb() || Subtarget->isThumb2()) setTargetDAGCombine(ISD::ABS); diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp --- a/llvm/lib/Target/AVR/AVRISelLowering.cpp +++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp @@ -236,7 +236,7 @@ setLibcallName(RTLIB::SIN_F32, "sin"); setLibcallName(RTLIB::COS_F32, "cos"); - setMinFunctionAlignment(1); + setMinFunctionLogAlignment(1); setMinimumJumpTableEntries(UINT_MAX); } diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp --- a/llvm/lib/Target/BPF/BPFISelLowering.cpp +++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp @@ -133,8 +133,8 @@ setBooleanContents(ZeroOrOneBooleanContent); // Function alignments (log2) - setMinFunctionAlignment(3); - setPrefFunctionAlignment(3); + setMinFunctionLogAlignment(3); + setPrefFunctionLogAlignment(3); if (BPFExpandMemcpyInOrder) { // LLVM generic code will try to expand memcpy into load/store pairs at this diff --git a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp --- a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp +++ b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp @@ -105,11 +105,11 @@ // offset of the current instruction from the start. unsigned InstOffset = 0; for (auto &B : MF) { - if (B.getAlignment()) { + if (B.getLogAlignment()) { // Although we don't know the exact layout of the final code, we need // to account for alignment padding somehow. This heuristic pads each // aligned basic block according to the alignment value. - int ByteAlign = (1u << B.getAlignment()) - 1; + int ByteAlign = (1u << B.getLogAlignment()) - 1; InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign); } OffsetMap[&B] = InstOffset; diff --git a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp --- a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp @@ -114,11 +114,11 @@ // First pass - compute the offset of each basic block. for (const MachineBasicBlock &MBB : MF) { - if (MBB.getAlignment()) { + if (MBB.getLogAlignment()) { // Although we don't know the exact layout of the final code, we need // to account for alignment padding somehow. This heuristic pads each // aligned basic block according to the alignment value. - int ByteAlign = (1u << MBB.getAlignment()) - 1; + int ByteAlign = (1u << MBB.getLogAlignment()) - 1; InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign); } diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -1231,8 +1231,8 @@ auto &HRI = *Subtarget.getRegisterInfo(); setPrefLoopAlignment(4); - setPrefFunctionAlignment(4); - setMinFunctionAlignment(2); + setPrefFunctionLogAlignment(4); + setMinFunctionLogAlignment(2); setStackPointerRegisterToSaveRestore(HRI.getStackRegister()); setBooleanContents(TargetLoweringBase::UndefinedBooleanContent); setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent); diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp --- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp +++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp @@ -145,8 +145,8 @@ setTargetDAGCombine(ISD::XOR); // Function alignments (log2) - setMinFunctionAlignment(2); - setPrefFunctionAlignment(2); + setMinFunctionLogAlignment(2); + setPrefFunctionLogAlignment(2); setJumpIsExpensive(true); diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp --- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -327,8 +327,8 @@ setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::MSP430_BUILTIN); // TODO: __mspabi_srall, __mspabi_srlll, __mspabi_sllll - setMinFunctionAlignment(1); - setPrefFunctionAlignment(1); + setMinFunctionLogAlignment(1); + setPrefFunctionLogAlignment(1); } SDValue MSP430TargetLowering::LowerOperation(SDValue Op, diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h @@ -14,7 +14,7 @@ namespace llvm { // Log2 of the NaCl MIPS sandbox's instruction bundle size. -static const unsigned MIPS_NACL_BUNDLE_ALIGN = 4u; +static const unsigned MIPS_NACL_BUNDLE_LOG_ALIGN = 4u; bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx, bool *IsStore = nullptr); diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp @@ -270,7 +270,7 @@ S->getAssembler().setRelaxAll(true); // Set bundle-alignment as required by the NaCl ABI for the target. - S->EmitBundleAlignMode(MIPS_NACL_BUNDLE_ALIGN); + S->EmitBundleAlignMode(MIPS_NACL_BUNDLE_LOG_ALIGN); return S; } diff --git a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp --- a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp @@ -399,7 +399,7 @@ // NaCl sandboxing requires that indirect call instructions are masked. // This means that function entry points should be bundle-aligned. if (Subtarget->isTargetNaCl()) - EmitAlignment(std::max(MF->getAlignment(), MIPS_NACL_BUNDLE_ALIGN)); + EmitAlignment(std::max(MF->getLogAlignment(), MIPS_NACL_BUNDLE_LOG_ALIGN)); if (Subtarget->inMicroMipsMode()) { TS.emitDirectiveSetMicroMips(); @@ -1274,14 +1274,14 @@ const std::vector &MBBs = JT[I].MBBs; for (unsigned J = 0; J < MBBs.size(); ++J) - MBBs[J]->setAlignment(MIPS_NACL_BUNDLE_ALIGN); + MBBs[J]->setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN); } } // If basic block address is taken, block can be target of indirect branch. for (auto &MBB : MF) { if (MBB.hasAddressTaken()) - MBB.setAlignment(MIPS_NACL_BUNDLE_ALIGN); + MBB.setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN); } } diff --git a/llvm/lib/Target/Mips/MipsBranchExpansion.cpp b/llvm/lib/Target/Mips/MipsBranchExpansion.cpp --- a/llvm/lib/Target/Mips/MipsBranchExpansion.cpp +++ b/llvm/lib/Target/Mips/MipsBranchExpansion.cpp @@ -507,7 +507,7 @@ .addImm(0); if (STI->isTargetNaCl()) // Bundle-align the target of indirect branch JR. - TgtMBB->setAlignment(MIPS_NACL_BUNDLE_ALIGN); + TgtMBB->setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN); // In NaCl, modifying the sp is not allowed in branch delay slot. // For MIPS32R6, we can skip using a delay slot branch. diff --git a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp --- a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -534,21 +534,22 @@ MF->push_back(BB); // MachineConstantPool measures alignment in bytes. We measure in log2(bytes). - unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment()); + unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment()); // Mark the basic block as required by the const-pool. // If AlignConstantIslands isn't set, use 4-byte alignment for everything. - BB->setAlignment(AlignConstantIslands ? MaxAlign : 2); + BB->setLogAlignment(AlignConstantIslands ? MaxLogAlign : 2); // The function needs to be as aligned as the basic blocks. The linker may // move functions around based on their alignment. - MF->ensureAlignment(BB->getAlignment()); + MF->ensureLogAlignment(BB->getLogAlignment()); // Order the entries in BB by descending alignment. That ensures correct // alignment of all entries as long as BB is sufficiently aligned. Keep // track of the insertion point for each alignment. We are going to bucket // sort the entries as they are created. - SmallVector InsPoint(MaxAlign + 1, BB->end()); + SmallVector InsPoint(MaxLogAlign + 1, + BB->end()); // Add all of the constants from the constant pool to the end block, use an // identity mapping of CPI's to CPE's. @@ -576,7 +577,7 @@ // Ensure that future entries with higher alignment get inserted before // CPEMI. This is bucket sort with iterators. - for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a) + for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a) if (InsPoint[a] == InsAt) InsPoint[a] = CPEMI; // Add a new CPEntry, but no corresponding CPUser yet. @@ -942,14 +943,14 @@ unsigned &Growth) { unsigned CPELogAlign = getCPELogAlign(*U.CPEMI); unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign); - unsigned NextBlockOffset, NextBlockAlignment; + unsigned NextBlockOffset, NextBlockLogAlignment; MachineFunction::const_iterator NextBlock = ++Water->getIterator(); if (NextBlock == MF->end()) { NextBlockOffset = BBInfo[Water->getNumber()].postOffset(); - NextBlockAlignment = 0; + NextBlockLogAlignment = 0; } else { NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset; - NextBlockAlignment = NextBlock->getAlignment(); + NextBlockLogAlignment = NextBlock->getLogAlignment(); } unsigned Size = U.CPEMI->getOperand(2).getImm(); unsigned CPEEnd = CPEOffset + Size; @@ -961,7 +962,7 @@ Growth = CPEEnd - NextBlockOffset; // Compute the padding that would go at the end of the CPE to align the next // block. - Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment); + Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockLogAlignment); // If the CPE is to be inserted before the instruction, that will raise // the offset of the instruction. Also account for unknown alignment padding @@ -1258,7 +1259,7 @@ // Try to split the block so it's fully aligned. Compute the latest split // point where we can add a 4-byte branch instruction, and then align to // LogAlign which is the largest possible alignment in the function. - unsigned LogAlign = MF->getAlignment(); + unsigned LogAlign = MF->getLogAlignment(); assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry"); unsigned BaseInsertOffset = UserOffset + U.getMaxDisp(); LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x", @@ -1399,7 +1400,7 @@ ++NumCPEs; // Mark the basic block as aligned as required by the const-pool entry. - NewIsland->setAlignment(getCPELogAlign(*U.CPEMI)); + NewIsland->setLogAlignment(getCPELogAlign(*U.CPEMI)); // Increase the size of the island block to account for the new entry. BBInfo[NewIsland->getNumber()].Size += Size; @@ -1431,10 +1432,10 @@ BBInfo[CPEBB->getNumber()].Size = 0; // This block no longer needs to be aligned. - CPEBB->setAlignment(0); + CPEBB->setLogAlignment(0); } else // Entries are sorted by descending alignment, so realign from the front. - CPEBB->setAlignment(getCPELogAlign(*CPEBB->begin())); + CPEBB->setLogAlignment(getCPELogAlign(*CPEBB->begin())); adjustBBOffsetsAfter(CPEBB); // An island has only one predecessor BB and one successor BB. Check if @@ -1529,7 +1530,7 @@ // We should have a way to back out this alignment restriction if we "can" later. // but it is not harmful. // - DestBB->setAlignment(2); + DestBB->setLogAlignment(2); Br.MaxDisp = ((1<<24)-1) * 2; MI->setDesc(TII->get(Mips::JalB16)); } diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -518,7 +518,7 @@ setLibcallName(RTLIB::SRA_I128, nullptr); } - setMinFunctionAlignment(Subtarget.isGP64bit() ? 3 : 2); + setMinFunctionLogAlignment(Subtarget.isGP64bit() ? 3 : 2); // The arguments on the stack are defined in terms of 4-byte slots on O32 // and 8-byte slots on N32/N64. diff --git a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp --- a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp +++ b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp @@ -81,14 +81,14 @@ /// original Offset. unsigned PPCBSel::GetAlignmentAdjustment(MachineBasicBlock &MBB, unsigned Offset) { - unsigned Align = MBB.getAlignment(); - if (!Align) + unsigned LogAlign = MBB.getLogAlignment(); + if (!LogAlign) return 0; - unsigned AlignAmt = 1 << Align; - unsigned ParentAlign = MBB.getParent()->getAlignment(); + unsigned AlignAmt = 1 << LogAlign; + unsigned ParentLogAlign = MBB.getParent()->getLogAlignment(); - if (Align <= ParentAlign) + if (LogAlign <= ParentLogAlign) return OffsetToAlignment(Offset, AlignAmt); // The alignment of this MBB is larger than the function's alignment, so we @@ -179,21 +179,21 @@ const MachineBasicBlock *Dest, unsigned BrOffset) { int BranchSize; - unsigned MaxAlign = 2; + unsigned MaxLogAlign = 2; bool NeedExtraAdjustment = false; if (Dest->getNumber() <= Src->getNumber()) { // If this is a backwards branch, the delta is the offset from the // start of this block to this branch, plus the sizes of all blocks // from this block to the dest. BranchSize = BrOffset; - MaxAlign = std::max(MaxAlign, Src->getAlignment()); + MaxLogAlign = std::max(MaxLogAlign, Src->getLogAlignment()); int DestBlock = Dest->getNumber(); BranchSize += BlockSizes[DestBlock].first; for (unsigned i = DestBlock+1, e = Src->getNumber(); i < e; ++i) { BranchSize += BlockSizes[i].first; - MaxAlign = std::max(MaxAlign, - Fn.getBlockNumbered(i)->getAlignment()); + MaxLogAlign = + std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment()); } NeedExtraAdjustment = (FirstImpreciseBlock >= 0) && @@ -204,11 +204,11 @@ unsigned StartBlock = Src->getNumber(); BranchSize = BlockSizes[StartBlock].first - BrOffset; - MaxAlign = std::max(MaxAlign, Dest->getAlignment()); + MaxLogAlign = std::max(MaxLogAlign, Dest->getLogAlignment()); for (unsigned i = StartBlock+1, e = Dest->getNumber(); i != e; ++i) { BranchSize += BlockSizes[i].first; - MaxAlign = std::max(MaxAlign, - Fn.getBlockNumbered(i)->getAlignment()); + MaxLogAlign = + std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment()); } NeedExtraAdjustment = (FirstImpreciseBlock >= 0) && @@ -258,7 +258,7 @@ // The computed offset is at most ((1 << alignment) - 4) bytes smaller // than actual offset. So we add this number to the offset for safety. if (NeedExtraAdjustment) - BranchSize += (1 << MaxAlign) - 4; + BranchSize += (1 << MaxLogAlign) - 4; return BranchSize; } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -735,7 +735,7 @@ const SelectionDAG &DAG, unsigned Depth = 0) const override; - unsigned getPrefLoopAlignment(MachineLoop *ML) const override; + unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override; bool shouldInsertFencesForAtomic(const Instruction *I) const override { return true; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1174,9 +1174,9 @@ setJumpIsExpensive(); } - setMinFunctionAlignment(2); + setMinFunctionLogAlignment(2); if (Subtarget.isDarwin()) - setPrefFunctionAlignment(4); + setPrefFunctionLogAlignment(4); switch (Subtarget.getDarwinDirective()) { default: break; @@ -1193,7 +1193,7 @@ case PPC::DIR_PWR7: case PPC::DIR_PWR8: case PPC::DIR_PWR9: - setPrefFunctionAlignment(4); + setPrefFunctionLogAlignment(4); setPrefLoopAlignment(4); break; } @@ -13975,7 +13975,7 @@ } } -unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { +unsigned PPCTargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const { switch (Subtarget.getDarwinDirective()) { default: break; case PPC::DIR_970: @@ -14018,7 +14018,7 @@ } } - return TargetLowering::getPrefLoopAlignment(ML); + return TargetLowering::getPrefLoopLogAlignment(ML); } /// getConstraintType - Given a constraint, return the type of diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -199,8 +199,8 @@ // Function alignments (log2). unsigned FunctionAlignment = Subtarget.hasStdExtC() ? 1 : 2; - setMinFunctionAlignment(FunctionAlignment); - setPrefFunctionAlignment(FunctionAlignment); + setMinFunctionLogAlignment(FunctionAlignment); + setPrefFunctionLogAlignment(FunctionAlignment); // Effectively disable jump table generation. setMinimumJumpTableEntries(INT_MAX); diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -1805,7 +1805,7 @@ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); - setMinFunctionAlignment(2); + setMinFunctionLogAlignment(2); computeRegisterProperties(Subtarget->getRegisterInfo()); } diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -120,9 +120,9 @@ setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); // Instructions are strings of 2-byte aligned 2-byte values. - setMinFunctionAlignment(2); + setMinFunctionLogAlignment(2); // For performance reasons we prefer 16-byte alignment. - setPrefFunctionAlignment(4); + setPrefFunctionLogAlignment(4); // Handle operations that are handled in a similar way for all types. for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; diff --git a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp --- a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp +++ b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp @@ -87,7 +87,7 @@ // The minimum alignment of the block, as a log2 value. // This value never changes. - unsigned Alignment = 0; + unsigned LogAlignment = 0; // The number of terminators in this block. This value never changes. unsigned NumTerminators = 0; @@ -127,7 +127,8 @@ // as the runtime address. unsigned KnownBits; - BlockPosition(unsigned InitialAlignment) : KnownBits(InitialAlignment) {} + BlockPosition(unsigned InitialLogAlignment) + : KnownBits(InitialLogAlignment) {} }; class SystemZLongBranch : public MachineFunctionPass { @@ -178,16 +179,16 @@ // instructions. void SystemZLongBranch::skipNonTerminators(BlockPosition &Position, MBBInfo &Block) { - if (Block.Alignment > Position.KnownBits) { + if (Block.LogAlignment > Position.KnownBits) { // When calculating the address of Block, we need to conservatively // assume that Block had the worst possible misalignment. - Position.Address += ((uint64_t(1) << Block.Alignment) - + Position.Address += ((uint64_t(1) << Block.LogAlignment) - (uint64_t(1) << Position.KnownBits)); - Position.KnownBits = Block.Alignment; + Position.KnownBits = Block.LogAlignment; } // Align the addresses. - uint64_t AlignMask = (uint64_t(1) << Block.Alignment) - 1; + uint64_t AlignMask = (uint64_t(1) << Block.LogAlignment) - 1; Position.Address = (Position.Address + AlignMask) & ~AlignMask; // Record the block's position. @@ -275,13 +276,13 @@ Terminators.clear(); Terminators.reserve(NumBlocks); - BlockPosition Position(MF->getAlignment()); + BlockPosition Position(MF->getLogAlignment()); for (unsigned I = 0; I < NumBlocks; ++I) { MachineBasicBlock *MBB = MF->getBlockNumbered(I); MBBInfo &Block = MBBs[I]; // Record the alignment, for quick access. - Block.Alignment = MBB->getAlignment(); + Block.LogAlignment = MBB->getLogAlignment(); // Calculate the size of the fixed part of the block. MachineBasicBlock::iterator MI = MBB->begin(); @@ -339,7 +340,7 @@ // must be long. void SystemZLongBranch::setWorstCaseAddresses() { SmallVector::iterator TI = Terminators.begin(); - BlockPosition Position(MF->getAlignment()); + BlockPosition Position(MF->getLogAlignment()); for (auto &Block : MBBs) { skipNonTerminators(Position, Block); for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) { @@ -440,7 +441,7 @@ // Run a shortening pass and relax any branches that need to be relaxed. void SystemZLongBranch::relaxBranches() { SmallVector::iterator TI = Terminators.begin(); - BlockPosition Position(MF->getAlignment()); + BlockPosition Position(MF->getLogAlignment()); for (auto &Block : MBBs) { skipNonTerminators(Position, Block); for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) { diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -1876,7 +1876,7 @@ // but a conditional move could be stalled by an expensive earlier operation. PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder(); EnableExtLdPromotion = true; - setPrefFunctionAlignment(4); // 2^4 bytes. + setPrefFunctionLogAlignment(4); // 2^4 bytes. verifyIntrinsicTables(); } diff --git a/llvm/lib/Target/X86/X86RetpolineThunks.cpp b/llvm/lib/Target/X86/X86RetpolineThunks.cpp --- a/llvm/lib/Target/X86/X86RetpolineThunks.cpp +++ b/llvm/lib/Target/X86/X86RetpolineThunks.cpp @@ -279,7 +279,7 @@ CallTarget->addLiveIn(Reg); CallTarget->setHasAddressTaken(); - CallTarget->setAlignment(4); + CallTarget->setLogAlignment(4); insertRegReturnAddrClobber(*CallTarget, Reg); CallTarget->back().setPreInstrSymbol(MF, TargetSym); BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc)); diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp --- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp +++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp @@ -171,8 +171,8 @@ setTargetDAGCombine(ISD::INTRINSIC_VOID); setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); - setMinFunctionAlignment(1); - setPrefFunctionAlignment(2); + setMinFunctionLogAlignment(1); + setPrefFunctionLogAlignment(2); } bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {