diff --git a/llvm/docs/MIRLangRef.rst b/llvm/docs/MIRLangRef.rst --- a/llvm/docs/MIRLangRef.rst +++ b/llvm/docs/MIRLangRef.rst @@ -343,6 +343,8 @@ .. TODO: Describe the way the reference to an unnamed LLVM IR block can be preserved. +``Alignment`` is specified in bytes, and must be a power of two. + Machine Instructions -------------------- @@ -614,9 +616,13 @@ alignment: isTargetSpecific: -where ```` is a 32-bit unsigned integer, ```` is a `LLVM IR Constant -`_, alignment is a 32-bit -unsigned integer, and ```` is either true or false. +where: + - ```` is a 32-bit unsigned integer; + - ```` is a `LLVM IR Constant + `_; + - ```` is a 32-bit unsigned integer specified in bytes, and must be + power of two; + - ```` is either true or false. Example: diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h --- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h +++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h @@ -105,7 +105,7 @@ /// Alignment of the basic block. Zero if the basic block does not need to be /// aligned. The alignment is specified as log2(bytes). - unsigned Alignment = 0; + unsigned LogAlignment = 0; /// Indicate that this basic block is entered via an exception handler. bool IsEHPad = false; @@ -374,11 +374,11 @@ /// Return alignment of the basic block. The alignment is specified as /// log2(bytes). - unsigned getAlignment() const { return Alignment; } + unsigned getLogAlignment() const { return LogAlignment; } /// Set alignment of the basic block. The alignment is specified as /// log2(bytes). - void setAlignment(unsigned Align) { Alignment = Align; } + void setLogAlignment(unsigned A) { LogAlignment = A; } /// Returns true if the block is a landing pad. That is this basic block is /// entered via an exception handler. diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h --- a/llvm/include/llvm/CodeGen/MachineFunction.h +++ b/llvm/include/llvm/CodeGen/MachineFunction.h @@ -277,7 +277,7 @@ unsigned FunctionNumber; /// Alignment - The alignment of the function. - unsigned Alignment; + unsigned LogAlignment; /// ExposesReturnsTwice - True if the function calls setjmp or related /// functions with attribute "returns twice", but doesn't have @@ -508,15 +508,16 @@ const WinEHFuncInfo *getWinEHFuncInfo() const { return WinEHInfo; } WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; } - /// getAlignment - Return the alignment (log2, not bytes) of the function. - unsigned getAlignment() const { return Alignment; } + /// getLogAlignment - Return the alignment of the function. + unsigned getLogAlignment() const { return LogAlignment; } - /// setAlignment - Set the alignment (log2, not bytes) of the function. - void setAlignment(unsigned A) { Alignment = A; } + /// setLogAlignment - Set the alignment of the function. + void setLogAlignment(unsigned A) { LogAlignment = A; } /// ensureAlignment - Make sure the function is at least 1 << A bytes aligned. - void ensureAlignment(unsigned A) { - if (Alignment < A) Alignment = A; + void ensureLogAlignment(unsigned A) { + if (LogAlignment < A) + LogAlignment = A; } /// exposesReturnsTwice - Returns true if the function calls setjmp or diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -1582,18 +1582,18 @@ } /// Return the minimum function alignment. - unsigned getMinFunctionAlignment() const { - return MinFunctionAlignment; + unsigned getMinFunctionLogAlignment() const { + return MinFunctionLogAlignment; } /// Return the preferred function alignment. - unsigned getPrefFunctionAlignment() const { - return PrefFunctionAlignment; + unsigned getPrefFunctionLogAlignment() const { + return PrefFunctionLogAlignment; } /// Return the preferred loop alignment. - virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const { - return PrefLoopAlignment; + virtual unsigned getPrefLoopLogAlignment(MachineLoop *ML = nullptr) const { + return PrefLoopLogAlignment; } /// Should loops be aligned even when the function is marked OptSize (but not @@ -2105,23 +2105,23 @@ } /// Set the target's minimum function alignment (in log2(bytes)) - void setMinFunctionAlignment(unsigned Align) { - MinFunctionAlignment = Align; + void setMinFunctionLogAlignment(unsigned LogAlign) { + MinFunctionLogAlignment = LogAlign; } /// Set the target's preferred function alignment. This should be set if /// there is a performance benefit to higher-than-minimum alignment (in /// log2(bytes)) - void setPrefFunctionAlignment(unsigned Align) { - PrefFunctionAlignment = Align; + void setPrefFunctionLogAlignment(unsigned LogAlign) { + PrefFunctionLogAlignment = LogAlign; } /// Set the target's preferred loop alignment. Default alignment is zero, it /// means the target does not care about loop alignment. The alignment is /// specified in log2(bytes). The target may also override /// getPrefLoopAlignment to provide per-loop values. - void setPrefLoopAlignment(unsigned Align) { - PrefLoopAlignment = Align; + void setPrefLoopLogAlignment(unsigned LogAlign) { + PrefLoopLogAlignment = LogAlign; } /// Set the minimum stack alignment of an argument (in log2(bytes)). @@ -2692,14 +2692,14 @@ /// The minimum function alignment (used when optimizing for size, and to /// prevent explicitly provided alignment from leading to incorrect code). - unsigned MinFunctionAlignment; + unsigned MinFunctionLogAlignment; /// The preferred function alignment (used when alignment unspecified and /// optimizing for speed). - unsigned PrefFunctionAlignment; + unsigned PrefFunctionLogAlignment; - /// The preferred loop alignment. - unsigned PrefLoopAlignment; + /// The preferred loop alignment (in log2 bot in bytes). + unsigned PrefLoopLogAlignment; /// Size in bits of the maximum atomics size the backend supports. /// Accesses larger than this will be expanded by AtomicExpandPass. diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -667,7 +667,7 @@ EmitLinkage(&F, CurrentFnSym); if (MAI->hasFunctionAlignment()) - EmitAlignment(MF->getAlignment(), &F); + EmitAlignment(MF->getLogAlignment(), &F); if (MAI->hasDotTypeDotSizeDirective()) OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction); @@ -2905,8 +2905,8 @@ } // Emit an alignment directive for this block, if needed. - if (unsigned Align = MBB.getAlignment()) - EmitAlignment(Align); + if (unsigned LogAlign = MBB.getLogAlignment()) + EmitAlignment(LogAlign); MCCodePaddingContext Context; setupCodePaddingContext(MBB, Context); OutStreamer->EmitCodePaddingBasicBlockStart(Context); diff --git a/llvm/lib/CodeGen/AsmPrinter/WinException.cpp b/llvm/lib/CodeGen/AsmPrinter/WinException.cpp --- a/llvm/lib/CodeGen/AsmPrinter/WinException.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/WinException.cpp @@ -203,8 +203,8 @@ // We want our funclet's entry point to be aligned such that no nops will be // present after the label. - Asm->EmitAlignment(std::max(Asm->MF->getAlignment(), MBB.getAlignment()), - &F); + Asm->EmitAlignment( + std::max(Asm->MF->getLogAlignment(), MBB.getLogAlignment()), &F); // Now that we've emitted the alignment directive, point at our funclet. Asm->OutStreamer->EmitLabel(Sym); diff --git a/llvm/lib/CodeGen/BranchRelaxation.cpp b/llvm/lib/CodeGen/BranchRelaxation.cpp --- a/llvm/lib/CodeGen/BranchRelaxation.cpp +++ b/llvm/lib/CodeGen/BranchRelaxation.cpp @@ -65,13 +65,13 @@ /// block. unsigned postOffset(const MachineBasicBlock &MBB) const { unsigned PO = Offset + Size; - unsigned Align = MBB.getAlignment(); - if (Align == 0) + unsigned LogAlign = MBB.getLogAlignment(); + if (LogAlign == 0) return PO; - unsigned AlignAmt = 1 << Align; - unsigned ParentAlign = MBB.getParent()->getAlignment(); - if (Align <= ParentAlign) + unsigned AlignAmt = 1 << LogAlign; + unsigned ParentLogAlign = MBB.getParent()->getLogAlignment(); + if (LogAlign <= ParentLogAlign) return PO + OffsetToAlignment(PO, AlignAmt); // The alignment of this MBB is larger than the function's alignment, so we @@ -128,9 +128,9 @@ #ifndef NDEBUG unsigned PrevNum = MF->begin()->getNumber(); for (MachineBasicBlock &MBB : *MF) { - unsigned Align = MBB.getAlignment(); + unsigned LogAlign = MBB.getLogAlignment(); unsigned Num = MBB.getNumber(); - assert(BlockInfo[Num].Offset % (1u << Align) == 0); + assert(BlockInfo[Num].Offset % (1u << LogAlign) == 0); assert(!Num || BlockInfo[PrevNum].postOffset(MBB) <= BlockInfo[Num].Offset); assert(BlockInfo[Num].Size == computeBlockSize(MBB)); PrevNum = Num; diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -641,7 +641,7 @@ return error(Loc, Twine("redefinition of machine basic block with id #") + Twine(ID)); if (Alignment) - MBB->setAlignment(Alignment); + MBB->setLogAlignment(Log2_32(Alignment)); if (HasAddressTaken) MBB->setHasAddressTaken(); MBB->setIsEHPad(IsLandingPad); diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp --- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp @@ -393,7 +393,7 @@ } if (YamlMF.Alignment) - MF.setAlignment(YamlMF.Alignment); + MF.setLogAlignment(Log2_32(YamlMF.Alignment)); MF.setExposesReturnsTwice(YamlMF.ExposesReturnsTwice); MF.setHasWinCFI(YamlMF.HasWinCFI); diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp --- a/llvm/lib/CodeGen/MIRPrinter.cpp +++ b/llvm/lib/CodeGen/MIRPrinter.cpp @@ -197,7 +197,7 @@ yaml::MachineFunction YamlMF; YamlMF.Name = MF.getName(); - YamlMF.Alignment = MF.getAlignment(); + YamlMF.Alignment = 1UL << MF.getLogAlignment(); YamlMF.ExposesReturnsTwice = MF.exposesReturnsTwice(); YamlMF.HasWinCFI = MF.hasWinCFI(); @@ -629,9 +629,10 @@ OS << "landing-pad"; HasAttributes = true; } - if (MBB.getAlignment()) { + if (MBB.getLogAlignment()) { OS << (HasAttributes ? ", " : " ("); - OS << "align " << MBB.getAlignment(); + OS << "align " + << (1UL << MBB.getLogAlignment()); HasAttributes = true; } if (HasAttributes) diff --git a/llvm/lib/CodeGen/MachineBasicBlock.cpp b/llvm/lib/CodeGen/MachineBasicBlock.cpp --- a/llvm/lib/CodeGen/MachineBasicBlock.cpp +++ b/llvm/lib/CodeGen/MachineBasicBlock.cpp @@ -326,9 +326,9 @@ OS << "landing-pad"; HasAttributes = true; } - if (getAlignment()) { + if (getLogAlignment()) { OS << (HasAttributes ? ", " : " ("); - OS << "align " << getAlignment(); + OS << "align " << getLogAlignment(); HasAttributes = true; } if (HasAttributes) diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -79,16 +79,17 @@ STATISTIC(UncondBranchTakenFreq, "Potential frequency of taking unconditional branches"); -static cl::opt AlignAllBlock("align-all-blocks", - cl::desc("Force the alignment of all " - "blocks in the function."), - cl::init(0), cl::Hidden); +static cl::opt AlignAllBlock( + "align-all-blocks", + cl::desc("Force the alignment of all blocks in the function in log2 format " + "(e.g 4 means align on 16B boundaries)."), + cl::init(0), cl::Hidden); static cl::opt AlignAllNonFallThruBlocks( "align-all-nofallthru-blocks", - cl::desc("Force the alignment of all " - "blocks that have no fall-through predecessors (i.e. don't add " - "nops that are executed)."), + cl::desc("Force the alignment of all blocks that have no fall-through " + "predecessors (i.e. don't add nops that are executed). In log2 " + "format (e.g 4 means align on 16B boundaries)."), cl::init(0), cl::Hidden); // FIXME: Find a good default for this flag and remove the flag. @@ -2763,8 +2764,8 @@ if (!L) continue; - unsigned Align = TLI->getPrefLoopAlignment(L); - if (!Align) + unsigned LogAlign = TLI->getPrefLoopLogAlignment(L); + if (!LogAlign) continue; // Don't care about loop alignment. // If the block is cold relative to the function entry don't waste space @@ -2788,7 +2789,7 @@ // Force alignment if all the predecessors are jumps. We already checked // that the block isn't cold above. if (!LayoutPred->isSuccessor(ChainBB)) { - ChainBB->setAlignment(Align); + ChainBB->setLogAlignment(LogAlign); continue; } @@ -2800,7 +2801,7 @@ MBPI->getEdgeProbability(LayoutPred, ChainBB); BlockFrequency LayoutEdgeFreq = MBFI->getBlockFreq(LayoutPred) * LayoutProb; if (LayoutEdgeFreq <= (Freq * ColdProb)) - ChainBB->setAlignment(Align); + ChainBB->setLogAlignment(LogAlign); } } @@ -3062,14 +3063,14 @@ if (AlignAllBlock) // Align all of the blocks in the function to a specific alignment. for (MachineBasicBlock &MBB : MF) - MBB.setAlignment(AlignAllBlock); + MBB.setLogAlignment(AlignAllBlock); else if (AlignAllNonFallThruBlocks) { // Align all of the blocks that have no fall-through predecessors to a // specific alignment. for (auto MBI = std::next(MF.begin()), MBE = MF.end(); MBI != MBE; ++MBI) { auto LayoutPred = std::prev(MBI); if (!LayoutPred->isSuccessor(&*MBI)) - MBI->setAlignment(AlignAllNonFallThruBlocks); + MBI->setLogAlignment(AlignAllNonFallThruBlocks); } } if (ViewBlockLayoutWithBFI != GVDT_None && diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -78,10 +78,11 @@ #define DEBUG_TYPE "codegen" -static cl::opt -AlignAllFunctions("align-all-functions", - cl::desc("Force the alignment of all functions."), - cl::init(0), cl::Hidden); +static cl::opt AlignAllFunctions( + "align-all-functions", + cl::desc("Force the alignment of all functions in log2 format (e.g. 4 " + "means align on 16B boundaries)."), + cl::init(0), cl::Hidden); static const char *getPropertyName(MachineFunctionProperties::Property Prop) { using P = MachineFunctionProperties::Property; @@ -172,16 +173,16 @@ FrameInfo->ensureMaxAlignment(F.getFnStackAlignment()); ConstantPool = new (Allocator) MachineConstantPool(getDataLayout()); - Alignment = STI->getTargetLowering()->getMinFunctionAlignment(); + LogAlignment = STI->getTargetLowering()->getMinFunctionLogAlignment(); // FIXME: Shouldn't use pref alignment if explicit alignment is set on F. // FIXME: Use Function::hasOptSize(). if (!F.hasFnAttribute(Attribute::OptimizeForSize)) - Alignment = std::max(Alignment, - STI->getTargetLowering()->getPrefFunctionAlignment()); + LogAlignment = std::max( + LogAlignment, STI->getTargetLowering()->getPrefFunctionLogAlignment()); if (AlignAllFunctions) - Alignment = AlignAllFunctions; + LogAlignment = AlignAllFunctions; JumpTableInfo = nullptr; diff --git a/llvm/lib/CodeGen/PatchableFunction.cpp b/llvm/lib/CodeGen/PatchableFunction.cpp --- a/llvm/lib/CodeGen/PatchableFunction.cpp +++ b/llvm/lib/CodeGen/PatchableFunction.cpp @@ -78,7 +78,7 @@ MIB.add(MO); FirstActualI->eraseFromParent(); - MF.ensureAlignment(4); + MF.ensureLogAlignment(4); return true; } diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -583,9 +583,9 @@ BooleanFloatContents = UndefinedBooleanContent; BooleanVectorContents = UndefinedBooleanContent; SchedPreferenceInfo = Sched::ILP; - MinFunctionAlignment = 0; - PrefFunctionAlignment = 0; - PrefLoopAlignment = 0; + MinFunctionLogAlignment = 0; + PrefFunctionLogAlignment = 0; + PrefLoopLogAlignment = 0; GatherAllAliasesMaxDepth = 18; MinStackArgumentAlignment = 1; // TODO: the default will be switched to 0 in the next commit, along diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -640,10 +640,10 @@ EnableExtLdPromotion = true; // Set required alignment. - setMinFunctionAlignment(2); + setMinFunctionLogAlignment(2); // Set preferred alignments. - setPrefFunctionAlignment(STI.getPrefFunctionAlignment()); - setPrefLoopAlignment(STI.getPrefLoopAlignment()); + setPrefFunctionLogAlignment(STI.getPrefFunctionLogAlignment()); + setPrefLoopLogAlignment(STI.getPrefLoopLogAlignment()); // Only change the limit for entries in a jump table if specified by // the sub target, but not at the command line. diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h --- a/llvm/lib/Target/AArch64/AArch64Subtarget.h +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h @@ -198,8 +198,8 @@ uint16_t PrefetchDistance = 0; uint16_t MinPrefetchStride = 1; unsigned MaxPrefetchIterationsAhead = UINT_MAX; - unsigned PrefFunctionAlignment = 0; - unsigned PrefLoopAlignment = 0; + unsigned PrefFunctionLogAlignment = 0; + unsigned PrefLoopLogAlignment = 0; unsigned MaxJumpTableSize = 0; unsigned WideningBaseCost = 0; @@ -359,8 +359,10 @@ unsigned getMaxPrefetchIterationsAhead() const { return MaxPrefetchIterationsAhead; } - unsigned getPrefFunctionAlignment() const { return PrefFunctionAlignment; } - unsigned getPrefLoopAlignment() const { return PrefLoopAlignment; } + unsigned getPrefFunctionLogAlignment() const { + return PrefFunctionLogAlignment; + } + unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; } unsigned getMaximumJumpTableSize() const { return MaxJumpTableSize; } diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp --- a/llvm/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.cpp @@ -71,22 +71,22 @@ case CortexA35: break; case CortexA53: - PrefFunctionAlignment = 3; + PrefFunctionLogAlignment = 3; break; case CortexA55: break; case CortexA57: MaxInterleaveFactor = 4; - PrefFunctionAlignment = 4; + PrefFunctionLogAlignment = 4; break; case CortexA65: - PrefFunctionAlignment = 3; + PrefFunctionLogAlignment = 3; break; case CortexA72: case CortexA73: case CortexA75: case CortexA76: - PrefFunctionAlignment = 4; + PrefFunctionLogAlignment = 4; break; case Cyclone: CacheLineSize = 64; @@ -97,14 +97,14 @@ case ExynosM1: MaxInterleaveFactor = 4; MaxJumpTableSize = 8; - PrefFunctionAlignment = 4; - PrefLoopAlignment = 3; + PrefFunctionLogAlignment = 4; + PrefLoopLogAlignment = 3; break; case ExynosM3: MaxInterleaveFactor = 4; MaxJumpTableSize = 20; - PrefFunctionAlignment = 5; - PrefLoopAlignment = 4; + PrefFunctionLogAlignment = 5; + PrefLoopLogAlignment = 4; break; case Falkor: MaxInterleaveFactor = 4; @@ -126,10 +126,10 @@ MinVectorRegisterBitWidth = 128; break; case NeoverseE1: - PrefFunctionAlignment = 3; + PrefFunctionLogAlignment = 3; break; case NeoverseN1: - PrefFunctionAlignment = 4; + PrefFunctionLogAlignment = 4; break; case Saphira: MaxInterleaveFactor = 4; @@ -138,8 +138,8 @@ break; case ThunderX2T99: CacheLineSize = 64; - PrefFunctionAlignment = 3; - PrefLoopAlignment = 2; + PrefFunctionLogAlignment = 3; + PrefLoopLogAlignment = 2; MaxInterleaveFactor = 4; PrefetchDistance = 128; MinPrefetchStride = 1024; @@ -152,15 +152,15 @@ case ThunderXT81: case ThunderXT83: CacheLineSize = 128; - PrefFunctionAlignment = 3; - PrefLoopAlignment = 2; + PrefFunctionLogAlignment = 3; + PrefLoopLogAlignment = 2; // FIXME: remove this to enable 64-bit SLP if performance looks good. MinVectorRegisterBitWidth = 128; break; case TSV110: CacheLineSize = 64; - PrefFunctionAlignment = 4; - PrefLoopAlignment = 2; + PrefFunctionLogAlignment = 4; + PrefLoopLogAlignment = 2; break; } } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -417,7 +417,7 @@ // The starting address of all shader programs must be 256 bytes aligned. // Regular functions just need the basic required instruction alignment. - MF.setAlignment(MFI->isEntryFunction() ? 8 : 2); + MF.setLogAlignment(MFI->isEntryFunction() ? 8 : 2); SetupMachineFunction(MF); diff --git a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp --- a/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/R600AsmPrinter.cpp @@ -104,7 +104,7 @@ // Functions needs to be cacheline (256B) aligned. - MF.ensureAlignment(8); + MF.ensureLogAlignment(8); SetupMachineFunction(MF); diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h --- a/llvm/lib/Target/AMDGPU/SIISelLowering.h +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h @@ -379,8 +379,7 @@ unsigned Depth = 0) const override; AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; - unsigned getPrefLoopAlignment(MachineLoop *ML) const override; - + unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override; void allocateHSAUserSGPRs(CCState &CCInfo, MachineFunction &MF, diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -10681,15 +10681,15 @@ Known.Zero.setHighBits(getSubtarget()->getKnownHighZeroBitsForFrameIndex()); } -unsigned SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { - const unsigned PrefAlign = TargetLowering::getPrefLoopAlignment(ML); - const unsigned CacheLineAlign = 6; // log2(64) +unsigned SITargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const { + const unsigned PrefLogAlign = TargetLowering::getPrefLoopLogAlignment(ML); + const unsigned CacheLineLogAlign = 6; // log2(64) // Pre-GFX10 target did not benefit from loop alignment if (!ML || DisableLoopAlignment || (getSubtarget()->getGeneration() < AMDGPUSubtarget::GFX10) || getSubtarget()->hasInstFwdPrefetchBug()) - return PrefAlign; + return PrefLogAlign; // On GFX10 I$ is 4 x 64 bytes cache lines. // By default prefetcher keeps one cache line behind and reads two ahead. @@ -10703,28 +10703,28 @@ const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); const MachineBasicBlock *Header = ML->getHeader(); - if (Header->getAlignment() != PrefAlign) - return Header->getAlignment(); // Already processed. + if (Header->getLogAlignment() != PrefLogAlign) + return Header->getLogAlignment(); // Already processed. unsigned LoopSize = 0; for (const MachineBasicBlock *MBB : ML->blocks()) { // If inner loop block is aligned assume in average half of the alignment // size to be added as nops. if (MBB != Header) - LoopSize += (1 << MBB->getAlignment()) / 2; + LoopSize += (1 << MBB->getLogAlignment()) / 2; for (const MachineInstr &MI : *MBB) { LoopSize += TII->getInstSizeInBytes(MI); if (LoopSize > 192) - return PrefAlign; + return PrefLogAlign; } } if (LoopSize <= 64) - return PrefAlign; + return PrefLogAlign; if (LoopSize <= 128) - return CacheLineAlign; + return CacheLineLogAlign; // If any of parent loops is surrounded by prefetch instructions do not // insert new for inner loop, which would reset parent's settings. @@ -10732,7 +10732,7 @@ if (MachineBasicBlock *Exit = P->getExitBlock()) { auto I = Exit->getFirstNonDebugInstr(); if (I != Exit->end() && I->getOpcode() == AMDGPU::S_INST_PREFETCH) - return CacheLineAlign; + return CacheLineLogAlign; } } @@ -10749,7 +10749,7 @@ .addImm(2); // prefetch 1 line behind PC } - return CacheLineAlign; + return CacheLineLogAlign; } LLVM_ATTRIBUTE_UNUSED diff --git a/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h b/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h --- a/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h +++ b/llvm/lib/Target/ARC/ARCMachineFunctionInfo.h @@ -35,7 +35,7 @@ : ReturnStackOffsetSet(false), VarArgsFrameIndex(0), ReturnStackOffset(-1U), MaxCallStackReq(0) { // Functions are 4-byte (2**2) aligned. - MF.setAlignment(2); + MF.setLogAlignment(2); } ~ARCFunctionInfo() {} diff --git a/llvm/lib/Target/ARM/ARM.td b/llvm/lib/Target/ARM/ARM.td --- a/llvm/lib/Target/ARM/ARM.td +++ b/llvm/lib/Target/ARM/ARM.td @@ -302,7 +302,7 @@ def FeaturePref32BitThumb : SubtargetFeature<"32bit", "Pref32BitThumb", "true", "Prefer 32-bit Thumb instrs">; -def FeaturePrefLoopAlign32 : SubtargetFeature<"loop-align", "PrefLoopAlignment","2", +def FeaturePrefLoopAlign32 : SubtargetFeature<"loop-align", "PrefLoopLogAlignment","2", "Prefer 32-bit alignment for loops">; def FeatureMVEVectorCostFactor1 : SubtargetFeature<"mve1beat", "MVEVectorCostFactor", "1", diff --git a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp --- a/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBasicBlockInfo.cpp @@ -63,7 +63,7 @@ // tBR_JTr contains a .align 2 directive. if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) { BBI.PostAlign = 2; - MBB->getParent()->ensureAlignment(2); + MBB->getParent()->ensureLogAlignment(2); } } @@ -126,7 +126,7 @@ for(unsigned i = BBNum + 1, e = MF.getNumBlockIDs(); i < e; ++i) { // Get the offset and known bits at the end of the layout predecessor. // Include the alignment of the current block. - unsigned LogAlign = MF.getBlockNumbered(i)->getAlignment(); + unsigned LogAlign = MF.getBlockNumbered(i)->getLogAlignment(); unsigned Offset = BBInfo[i - 1].postOffset(LogAlign); unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign); diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp --- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -396,7 +396,7 @@ // Functions with jump tables need an alignment of 4 because they use the ADR // instruction, which aligns the PC to 4 bytes before adding an offset. if (!T2JumpTables.empty()) - MF->ensureAlignment(2); + MF->ensureLogAlignment(2); /// Remove dead constant pool entries. MadeChange |= removeUnusedCPEntries(); @@ -486,20 +486,21 @@ MF->push_back(BB); // MachineConstantPool measures alignment in bytes. We measure in log2(bytes). - unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment()); + unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment()); // Mark the basic block as required by the const-pool. - BB->setAlignment(MaxAlign); + BB->setLogAlignment(MaxLogAlign); // The function needs to be as aligned as the basic blocks. The linker may // move functions around based on their alignment. - MF->ensureAlignment(BB->getAlignment()); + MF->ensureLogAlignment(BB->getLogAlignment()); // Order the entries in BB by descending alignment. That ensures correct // alignment of all entries as long as BB is sufficiently aligned. Keep // track of the insertion point for each alignment. We are going to bucket // sort the entries as they are created. - SmallVector InsPoint(MaxAlign + 1, BB->end()); + SmallVector InsPoint(MaxLogAlign + 1, + BB->end()); // Add all of the constants from the constant pool to the end block, use an // identity mapping of CPI's to CPE's. @@ -524,7 +525,7 @@ // Ensure that future entries with higher alignment get inserted before // CPEMI. This is bucket sort with iterators. - for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a) + for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a) if (InsPoint[a] == InsAt) InsPoint[a] = CPEMI; @@ -685,7 +686,7 @@ BBInfoVector &BBInfo = BBUtils->getBBInfo(); // The known bits of the entry block offset are determined by the function // alignment. - BBInfo.front().KnownBits = MF->getAlignment(); + BBInfo.front().KnownBits = MF->getLogAlignment(); // Compute block offsets and known bits. BBUtils->adjustBBOffsetsAfter(&MF->front()); @@ -1015,14 +1016,14 @@ BBInfoVector &BBInfo = BBUtils->getBBInfo(); unsigned CPELogAlign = getCPELogAlign(U.CPEMI); unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign); - unsigned NextBlockOffset, NextBlockAlignment; + unsigned NextBlockOffset, NextBlockLogAlignment; MachineFunction::const_iterator NextBlock = Water->getIterator(); if (++NextBlock == MF->end()) { NextBlockOffset = BBInfo[Water->getNumber()].postOffset(); - NextBlockAlignment = 0; + NextBlockLogAlignment = 0; } else { NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset; - NextBlockAlignment = NextBlock->getAlignment(); + NextBlockLogAlignment = NextBlock->getLogAlignment(); } unsigned Size = U.CPEMI->getOperand(2).getImm(); unsigned CPEEnd = CPEOffset + Size; @@ -1034,13 +1035,13 @@ Growth = CPEEnd - NextBlockOffset; // Compute the padding that would go at the end of the CPE to align the next // block. - Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment); + Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockLogAlignment); // If the CPE is to be inserted before the instruction, that will raise // the offset of the instruction. Also account for unknown alignment padding // in blocks between CPE and the user. if (CPEOffset < UserOffset) - UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign); + UserOffset += Growth + UnknownPadding(MF->getLogAlignment(), CPELogAlign); } else // CPE fits in existing padding. Growth = 0; @@ -1315,7 +1316,7 @@ // Try to split the block so it's fully aligned. Compute the latest split // point where we can add a 4-byte branch instruction, and then align to // LogAlign which is the largest possible alignment in the function. - unsigned LogAlign = MF->getAlignment(); + unsigned LogAlign = MF->getLogAlignment(); assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry"); unsigned KnownBits = UserBBI.internalKnownBits(); unsigned UPad = UnknownPadding(LogAlign, KnownBits); @@ -1493,9 +1494,9 @@ // Always align the new block because CP entries can be smaller than 4 // bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may // be an already aligned constant pool block. - const unsigned Align = isThumb ? 1 : 2; - if (NewMBB->getAlignment() < Align) - NewMBB->setAlignment(Align); + const unsigned LogAlign = isThumb ? 1 : 2; + if (NewMBB->getLogAlignment() < LogAlign) + NewMBB->setLogAlignment(LogAlign); // Remove the original WaterList entry; we want subsequent insertions in // this vicinity to go after the one we're about to insert. This @@ -1524,7 +1525,7 @@ decrementCPEReferenceCount(CPI, CPEMI); // Mark the basic block as aligned as required by the const-pool entry. - NewIsland->setAlignment(getCPELogAlign(U.CPEMI)); + NewIsland->setLogAlignment(getCPELogAlign(U.CPEMI)); // Increase the size of the island block to account for the new entry. BBUtils->adjustBBSize(NewIsland, Size); @@ -1558,10 +1559,10 @@ BBInfo[CPEBB->getNumber()].Size = 0; // This block no longer needs to be aligned. - CPEBB->setAlignment(0); + CPEBB->setLogAlignment(0); } else // Entries are sorted by descending alignment, so realign from the front. - CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin())); + CPEBB->setLogAlignment(getCPELogAlign(&*CPEBB->begin())); BBUtils->adjustBBOffsetsAfter(CPEBB); // An island has only one predecessor BB and one successor BB. Check if diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -1419,9 +1419,9 @@ // Prefer likely predicted branches to selects on out-of-order cores. PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); - setPrefLoopAlignment(Subtarget->getPrefLoopAlignment()); + setPrefLoopLogAlignment(Subtarget->getPrefLoopLogAlignment()); - setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); + setMinFunctionLogAlignment(Subtarget->isThumb() ? 1 : 2); if (Subtarget->isThumb() || Subtarget->isThumb2()) setTargetDAGCombine(ISD::ABS); diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h --- a/llvm/lib/Target/ARM/ARMSubtarget.h +++ b/llvm/lib/Target/ARM/ARMSubtarget.h @@ -470,7 +470,7 @@ int PreISelOperandLatencyAdjustment = 2; /// What alignment is preferred for loop bodies, in log2(bytes). - unsigned PrefLoopAlignment = 0; + unsigned PrefLoopLogAlignment = 0; /// The cost factor for MVE instructions, representing the multiple beats an // instruction can take. The default is 2, (set in initSubtargetFeatures so @@ -859,9 +859,7 @@ return isROPI() || !isTargetELF(); } - unsigned getPrefLoopAlignment() const { - return PrefLoopAlignment; - } + unsigned getPrefLoopLogAlignment() const { return PrefLoopLogAlignment; } unsigned getMVEVectorCostFactor() const { return MVEVectorCostFactor; } diff --git a/llvm/lib/Target/ARM/ARMSubtarget.cpp b/llvm/lib/Target/ARM/ARMSubtarget.cpp --- a/llvm/lib/Target/ARM/ARMSubtarget.cpp +++ b/llvm/lib/Target/ARM/ARMSubtarget.cpp @@ -300,7 +300,7 @@ LdStMultipleTiming = SingleIssuePlusExtras; MaxInterleaveFactor = 4; if (!isThumb()) - PrefLoopAlignment = 3; + PrefLoopLogAlignment = 3; break; case Kryo: break; diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp --- a/llvm/lib/Target/AVR/AVRISelLowering.cpp +++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp @@ -236,7 +236,7 @@ setLibcallName(RTLIB::SIN_F32, "sin"); setLibcallName(RTLIB::COS_F32, "cos"); - setMinFunctionAlignment(1); + setMinFunctionLogAlignment(1); setMinimumJumpTableEntries(UINT_MAX); } diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp --- a/llvm/lib/Target/BPF/BPFISelLowering.cpp +++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp @@ -133,8 +133,8 @@ setBooleanContents(ZeroOrOneBooleanContent); // Function alignments (log2) - setMinFunctionAlignment(3); - setPrefFunctionAlignment(3); + setMinFunctionLogAlignment(3); + setPrefFunctionLogAlignment(3); if (BPFExpandMemcpyInOrder) { // LLVM generic code will try to expand memcpy into load/store pairs at this diff --git a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp --- a/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp +++ b/llvm/lib/Target/Hexagon/HexagonBranchRelaxation.cpp @@ -105,11 +105,11 @@ // offset of the current instruction from the start. unsigned InstOffset = 0; for (auto &B : MF) { - if (B.getAlignment()) { + if (B.getLogAlignment()) { // Although we don't know the exact layout of the final code, we need // to account for alignment padding somehow. This heuristic pads each // aligned basic block according to the alignment value. - int ByteAlign = (1u << B.getAlignment()) - 1; + int ByteAlign = (1u << B.getLogAlignment()) - 1; InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign); } OffsetMap[&B] = InstOffset; diff --git a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp --- a/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFixupHwLoops.cpp @@ -114,11 +114,11 @@ // First pass - compute the offset of each basic block. for (const MachineBasicBlock &MBB : MF) { - if (MBB.getAlignment()) { + if (MBB.getLogAlignment()) { // Although we don't know the exact layout of the final code, we need // to account for alignment padding somehow. This heuristic pads each // aligned basic block according to the alignment value. - int ByteAlign = (1u << MBB.getAlignment()) - 1; + int ByteAlign = (1u << MBB.getLogAlignment()) - 1; InstOffset = (InstOffset + ByteAlign) & ~(ByteAlign); } diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -1235,9 +1235,9 @@ Subtarget(ST) { auto &HRI = *Subtarget.getRegisterInfo(); - setPrefLoopAlignment(4); - setPrefFunctionAlignment(4); - setMinFunctionAlignment(2); + setPrefLoopLogAlignment(4); + setPrefFunctionLogAlignment(4); + setMinFunctionLogAlignment(2); setStackPointerRegisterToSaveRestore(HRI.getStackRegister()); setBooleanContents(TargetLoweringBase::UndefinedBooleanContent); setBooleanVectorContents(TargetLoweringBase::UndefinedBooleanContent); diff --git a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp --- a/llvm/lib/Target/Lanai/LanaiISelLowering.cpp +++ b/llvm/lib/Target/Lanai/LanaiISelLowering.cpp @@ -145,8 +145,8 @@ setTargetDAGCombine(ISD::XOR); // Function alignments (log2) - setMinFunctionAlignment(2); - setPrefFunctionAlignment(2); + setMinFunctionLogAlignment(2); + setPrefFunctionLogAlignment(2); setJumpIsExpensive(true); diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp --- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -327,8 +327,8 @@ setLibcallCallingConv(RTLIB::OGT_F64, CallingConv::MSP430_BUILTIN); // TODO: __mspabi_srall, __mspabi_srlll, __mspabi_sllll - setMinFunctionAlignment(1); - setPrefFunctionAlignment(1); + setMinFunctionLogAlignment(1); + setPrefFunctionLogAlignment(1); } SDValue MSP430TargetLowering::LowerOperation(SDValue Op, diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCNaCl.h @@ -14,7 +14,7 @@ namespace llvm { // Log2 of the NaCl MIPS sandbox's instruction bundle size. -static const unsigned MIPS_NACL_BUNDLE_ALIGN = 4u; +static const unsigned MIPS_NACL_BUNDLE_LOG_ALIGN = 4u; bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx, bool *IsStore = nullptr); diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsNaClELFStreamer.cpp @@ -270,7 +270,7 @@ S->getAssembler().setRelaxAll(true); // Set bundle-alignment as required by the NaCl ABI for the target. - S->EmitBundleAlignMode(MIPS_NACL_BUNDLE_ALIGN); + S->EmitBundleAlignMode(MIPS_NACL_BUNDLE_LOG_ALIGN); return S; } diff --git a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp --- a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp @@ -400,7 +400,7 @@ // NaCl sandboxing requires that indirect call instructions are masked. // This means that function entry points should be bundle-aligned. if (Subtarget->isTargetNaCl()) - EmitAlignment(std::max(MF->getAlignment(), MIPS_NACL_BUNDLE_ALIGN)); + EmitAlignment(std::max(MF->getLogAlignment(), MIPS_NACL_BUNDLE_LOG_ALIGN)); if (Subtarget->inMicroMipsMode()) { TS.emitDirectiveSetMicroMips(); @@ -1278,14 +1278,14 @@ const std::vector &MBBs = JT[I].MBBs; for (unsigned J = 0; J < MBBs.size(); ++J) - MBBs[J]->setAlignment(MIPS_NACL_BUNDLE_ALIGN); + MBBs[J]->setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN); } } // If basic block address is taken, block can be target of indirect branch. for (auto &MBB : MF) { if (MBB.hasAddressTaken()) - MBB.setAlignment(MIPS_NACL_BUNDLE_ALIGN); + MBB.setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN); } } diff --git a/llvm/lib/Target/Mips/MipsBranchExpansion.cpp b/llvm/lib/Target/Mips/MipsBranchExpansion.cpp --- a/llvm/lib/Target/Mips/MipsBranchExpansion.cpp +++ b/llvm/lib/Target/Mips/MipsBranchExpansion.cpp @@ -507,7 +507,7 @@ .addImm(0); if (STI->isTargetNaCl()) // Bundle-align the target of indirect branch JR. - TgtMBB->setAlignment(MIPS_NACL_BUNDLE_ALIGN); + TgtMBB->setLogAlignment(MIPS_NACL_BUNDLE_LOG_ALIGN); // In NaCl, modifying the sp is not allowed in branch delay slot. // For MIPS32R6, we can skip using a delay slot branch. diff --git a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp --- a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -534,21 +534,22 @@ MF->push_back(BB); // MachineConstantPool measures alignment in bytes. We measure in log2(bytes). - unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment()); + unsigned MaxLogAlign = Log2_32(MCP->getConstantPoolAlignment()); // Mark the basic block as required by the const-pool. // If AlignConstantIslands isn't set, use 4-byte alignment for everything. - BB->setAlignment(AlignConstantIslands ? MaxAlign : 2); + BB->setLogAlignment(AlignConstantIslands ? MaxLogAlign : 2); // The function needs to be as aligned as the basic blocks. The linker may // move functions around based on their alignment. - MF->ensureAlignment(BB->getAlignment()); + MF->ensureLogAlignment(BB->getLogAlignment()); // Order the entries in BB by descending alignment. That ensures correct // alignment of all entries as long as BB is sufficiently aligned. Keep // track of the insertion point for each alignment. We are going to bucket // sort the entries as they are created. - SmallVector InsPoint(MaxAlign + 1, BB->end()); + SmallVector InsPoint(MaxLogAlign + 1, + BB->end()); // Add all of the constants from the constant pool to the end block, use an // identity mapping of CPI's to CPE's. @@ -576,7 +577,7 @@ // Ensure that future entries with higher alignment get inserted before // CPEMI. This is bucket sort with iterators. - for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a) + for (unsigned a = LogAlign + 1; a <= MaxLogAlign; ++a) if (InsPoint[a] == InsAt) InsPoint[a] = CPEMI; // Add a new CPEntry, but no corresponding CPUser yet. @@ -942,14 +943,14 @@ unsigned &Growth) { unsigned CPELogAlign = getCPELogAlign(*U.CPEMI); unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign); - unsigned NextBlockOffset, NextBlockAlignment; + unsigned NextBlockOffset, NextBlockLogAlignment; MachineFunction::const_iterator NextBlock = ++Water->getIterator(); if (NextBlock == MF->end()) { NextBlockOffset = BBInfo[Water->getNumber()].postOffset(); - NextBlockAlignment = 0; + NextBlockLogAlignment = 0; } else { NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset; - NextBlockAlignment = NextBlock->getAlignment(); + NextBlockLogAlignment = NextBlock->getLogAlignment(); } unsigned Size = U.CPEMI->getOperand(2).getImm(); unsigned CPEEnd = CPEOffset + Size; @@ -961,7 +962,7 @@ Growth = CPEEnd - NextBlockOffset; // Compute the padding that would go at the end of the CPE to align the next // block. - Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment); + Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockLogAlignment); // If the CPE is to be inserted before the instruction, that will raise // the offset of the instruction. Also account for unknown alignment padding @@ -1258,7 +1259,7 @@ // Try to split the block so it's fully aligned. Compute the latest split // point where we can add a 4-byte branch instruction, and then align to // LogAlign which is the largest possible alignment in the function. - unsigned LogAlign = MF->getAlignment(); + unsigned LogAlign = MF->getLogAlignment(); assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry"); unsigned BaseInsertOffset = UserOffset + U.getMaxDisp(); LLVM_DEBUG(dbgs() << format("Split in middle of big block before %#x", @@ -1399,7 +1400,7 @@ ++NumCPEs; // Mark the basic block as aligned as required by the const-pool entry. - NewIsland->setAlignment(getCPELogAlign(*U.CPEMI)); + NewIsland->setLogAlignment(getCPELogAlign(*U.CPEMI)); // Increase the size of the island block to account for the new entry. BBInfo[NewIsland->getNumber()].Size += Size; @@ -1431,10 +1432,10 @@ BBInfo[CPEBB->getNumber()].Size = 0; // This block no longer needs to be aligned. - CPEBB->setAlignment(0); + CPEBB->setLogAlignment(0); } else // Entries are sorted by descending alignment, so realign from the front. - CPEBB->setAlignment(getCPELogAlign(*CPEBB->begin())); + CPEBB->setLogAlignment(getCPELogAlign(*CPEBB->begin())); adjustBBOffsetsAfter(CPEBB); // An island has only one predecessor BB and one successor BB. Check if @@ -1529,7 +1530,7 @@ // We should have a way to back out this alignment restriction if we "can" later. // but it is not harmful. // - DestBB->setAlignment(2); + DestBB->setLogAlignment(2); Br.MaxDisp = ((1<<24)-1) * 2; MI->setDesc(TII->get(Mips::JalB16)); } diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -518,7 +518,7 @@ setLibcallName(RTLIB::SRA_I128, nullptr); } - setMinFunctionAlignment(Subtarget.isGP64bit() ? 3 : 2); + setMinFunctionLogAlignment(Subtarget.isGP64bit() ? 3 : 2); // The arguments on the stack are defined in terms of 4-byte slots on O32 // and 8-byte slots on N32/N64. diff --git a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp --- a/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp +++ b/llvm/lib/Target/PowerPC/PPCBranchSelector.cpp @@ -81,14 +81,14 @@ /// original Offset. unsigned PPCBSel::GetAlignmentAdjustment(MachineBasicBlock &MBB, unsigned Offset) { - unsigned Align = MBB.getAlignment(); - if (!Align) + unsigned LogAlign = MBB.getLogAlignment(); + if (!LogAlign) return 0; - unsigned AlignAmt = 1 << Align; - unsigned ParentAlign = MBB.getParent()->getAlignment(); + unsigned AlignAmt = 1 << LogAlign; + unsigned ParentLogAlign = MBB.getParent()->getLogAlignment(); - if (Align <= ParentAlign) + if (LogAlign <= ParentLogAlign) return OffsetToAlignment(Offset, AlignAmt); // The alignment of this MBB is larger than the function's alignment, so we @@ -179,21 +179,21 @@ const MachineBasicBlock *Dest, unsigned BrOffset) { int BranchSize; - unsigned MaxAlign = 2; + unsigned MaxLogAlign = 2; bool NeedExtraAdjustment = false; if (Dest->getNumber() <= Src->getNumber()) { // If this is a backwards branch, the delta is the offset from the // start of this block to this branch, plus the sizes of all blocks // from this block to the dest. BranchSize = BrOffset; - MaxAlign = std::max(MaxAlign, Src->getAlignment()); + MaxLogAlign = std::max(MaxLogAlign, Src->getLogAlignment()); int DestBlock = Dest->getNumber(); BranchSize += BlockSizes[DestBlock].first; for (unsigned i = DestBlock+1, e = Src->getNumber(); i < e; ++i) { BranchSize += BlockSizes[i].first; - MaxAlign = std::max(MaxAlign, - Fn.getBlockNumbered(i)->getAlignment()); + MaxLogAlign = + std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment()); } NeedExtraAdjustment = (FirstImpreciseBlock >= 0) && @@ -204,11 +204,11 @@ unsigned StartBlock = Src->getNumber(); BranchSize = BlockSizes[StartBlock].first - BrOffset; - MaxAlign = std::max(MaxAlign, Dest->getAlignment()); + MaxLogAlign = std::max(MaxLogAlign, Dest->getLogAlignment()); for (unsigned i = StartBlock+1, e = Dest->getNumber(); i != e; ++i) { BranchSize += BlockSizes[i].first; - MaxAlign = std::max(MaxAlign, - Fn.getBlockNumbered(i)->getAlignment()); + MaxLogAlign = + std::max(MaxLogAlign, Fn.getBlockNumbered(i)->getLogAlignment()); } NeedExtraAdjustment = (FirstImpreciseBlock >= 0) && @@ -258,7 +258,7 @@ // The computed offset is at most ((1 << alignment) - 4) bytes smaller // than actual offset. So we add this number to the offset for safety. if (NeedExtraAdjustment) - BranchSize += (1 << MaxAlign) - 4; + BranchSize += (1 << MaxLogAlign) - 4; return BranchSize; } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -735,7 +735,7 @@ const SelectionDAG &DAG, unsigned Depth = 0) const override; - unsigned getPrefLoopAlignment(MachineLoop *ML) const override; + unsigned getPrefLoopLogAlignment(MachineLoop *ML) const override; bool shouldInsertFencesForAtomic(const Instruction *I) const override { return true; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1180,9 +1180,9 @@ setJumpIsExpensive(); } - setMinFunctionAlignment(2); + setMinFunctionLogAlignment(2); if (Subtarget.isDarwin()) - setPrefFunctionAlignment(4); + setPrefFunctionLogAlignment(4); switch (Subtarget.getDarwinDirective()) { default: break; @@ -1199,8 +1199,8 @@ case PPC::DIR_PWR7: case PPC::DIR_PWR8: case PPC::DIR_PWR9: - setPrefFunctionAlignment(4); - setPrefLoopAlignment(4); + setPrefFunctionLogAlignment(4); + setPrefLoopLogAlignment(4); break; } @@ -14007,7 +14007,7 @@ } } -unsigned PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { +unsigned PPCTargetLowering::getPrefLoopLogAlignment(MachineLoop *ML) const { switch (Subtarget.getDarwinDirective()) { default: break; case PPC::DIR_970: @@ -14050,7 +14050,7 @@ } } - return TargetLowering::getPrefLoopAlignment(ML); + return TargetLowering::getPrefLoopLogAlignment(ML); } /// getConstraintType - Given a constraint, return the type of diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -199,8 +199,8 @@ // Function alignments (log2). unsigned FunctionAlignment = Subtarget.hasStdExtC() ? 1 : 2; - setMinFunctionAlignment(FunctionAlignment); - setPrefFunctionAlignment(FunctionAlignment); + setMinFunctionLogAlignment(FunctionAlignment); + setPrefFunctionLogAlignment(FunctionAlignment); // Effectively disable jump table generation. setMinimumJumpTableEntries(INT_MAX); diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -1805,7 +1805,7 @@ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); - setMinFunctionAlignment(2); + setMinFunctionLogAlignment(2); computeRegisterProperties(Subtarget->getRegisterInfo()); } diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -120,9 +120,9 @@ setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); // Instructions are strings of 2-byte aligned 2-byte values. - setMinFunctionAlignment(2); + setMinFunctionLogAlignment(2); // For performance reasons we prefer 16-byte alignment. - setPrefFunctionAlignment(4); + setPrefFunctionLogAlignment(4); // Handle operations that are handled in a similar way for all types. for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE; diff --git a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp --- a/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp +++ b/llvm/lib/Target/SystemZ/SystemZLongBranch.cpp @@ -87,7 +87,7 @@ // The minimum alignment of the block, as a log2 value. // This value never changes. - unsigned Alignment = 0; + unsigned LogAlignment = 0; // The number of terminators in this block. This value never changes. unsigned NumTerminators = 0; @@ -127,7 +127,8 @@ // as the runtime address. unsigned KnownBits; - BlockPosition(unsigned InitialAlignment) : KnownBits(InitialAlignment) {} + BlockPosition(unsigned InitialLogAlignment) + : KnownBits(InitialLogAlignment) {} }; class SystemZLongBranch : public MachineFunctionPass { @@ -178,16 +179,16 @@ // instructions. void SystemZLongBranch::skipNonTerminators(BlockPosition &Position, MBBInfo &Block) { - if (Block.Alignment > Position.KnownBits) { + if (Block.LogAlignment > Position.KnownBits) { // When calculating the address of Block, we need to conservatively // assume that Block had the worst possible misalignment. - Position.Address += ((uint64_t(1) << Block.Alignment) - + Position.Address += ((uint64_t(1) << Block.LogAlignment) - (uint64_t(1) << Position.KnownBits)); - Position.KnownBits = Block.Alignment; + Position.KnownBits = Block.LogAlignment; } // Align the addresses. - uint64_t AlignMask = (uint64_t(1) << Block.Alignment) - 1; + uint64_t AlignMask = (uint64_t(1) << Block.LogAlignment) - 1; Position.Address = (Position.Address + AlignMask) & ~AlignMask; // Record the block's position. @@ -275,13 +276,13 @@ Terminators.clear(); Terminators.reserve(NumBlocks); - BlockPosition Position(MF->getAlignment()); + BlockPosition Position(MF->getLogAlignment()); for (unsigned I = 0; I < NumBlocks; ++I) { MachineBasicBlock *MBB = MF->getBlockNumbered(I); MBBInfo &Block = MBBs[I]; // Record the alignment, for quick access. - Block.Alignment = MBB->getAlignment(); + Block.LogAlignment = MBB->getLogAlignment(); // Calculate the size of the fixed part of the block. MachineBasicBlock::iterator MI = MBB->begin(); @@ -339,7 +340,7 @@ // must be long. void SystemZLongBranch::setWorstCaseAddresses() { SmallVector::iterator TI = Terminators.begin(); - BlockPosition Position(MF->getAlignment()); + BlockPosition Position(MF->getLogAlignment()); for (auto &Block : MBBs) { skipNonTerminators(Position, Block); for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) { @@ -440,7 +441,7 @@ // Run a shortening pass and relax any branches that need to be relaxed. void SystemZLongBranch::relaxBranches() { SmallVector::iterator TI = Terminators.begin(); - BlockPosition Position(MF->getAlignment()); + BlockPosition Position(MF->getLogAlignment()); for (auto &Block : MBBs) { skipNonTerminators(Position, Block); for (unsigned BTI = 0, BTE = Block.NumTerminators; BTI != BTE; ++BTI) { diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -1892,13 +1892,13 @@ MaxLoadsPerMemcmpOptSize = 2; // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4). - setPrefLoopAlignment(ExperimentalPrefLoopAlignment); + setPrefLoopLogAlignment(ExperimentalPrefLoopAlignment); // An out-of-order CPU can speculatively execute past a predictable branch, // but a conditional move could be stalled by an expensive earlier operation. PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder(); EnableExtLdPromotion = true; - setPrefFunctionAlignment(4); // 2^4 bytes. + setPrefFunctionLogAlignment(4); // 2^4 bytes. verifyIntrinsicTables(); } diff --git a/llvm/lib/Target/X86/X86RetpolineThunks.cpp b/llvm/lib/Target/X86/X86RetpolineThunks.cpp --- a/llvm/lib/Target/X86/X86RetpolineThunks.cpp +++ b/llvm/lib/Target/X86/X86RetpolineThunks.cpp @@ -279,7 +279,7 @@ CallTarget->addLiveIn(Reg); CallTarget->setHasAddressTaken(); - CallTarget->setAlignment(4); + CallTarget->setLogAlignment(4); insertRegReturnAddrClobber(*CallTarget, Reg); CallTarget->back().setPreInstrSymbol(MF, TargetSym); BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc)); diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp --- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp +++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp @@ -171,8 +171,8 @@ setTargetDAGCombine(ISD::INTRINSIC_VOID); setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); - setMinFunctionAlignment(1); - setPrefFunctionAlignment(2); + setMinFunctionLogAlignment(1); + setPrefFunctionLogAlignment(2); } bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { diff --git a/llvm/test/CodeGen/ARM/constant-island-movwt.mir b/llvm/test/CodeGen/ARM/constant-island-movwt.mir --- a/llvm/test/CodeGen/ARM/constant-island-movwt.mir +++ b/llvm/test/CodeGen/ARM/constant-island-movwt.mir @@ -892,12 +892,12 @@ # CHECK-NEXT: renamable $q12 = VDUP32q killed renamable $r5, 14, $noreg # CHECK-NEXT: t2B %bb.2, 14, $noreg # CHECK-NEXT: {{^ $}} -# CHECK-NEXT: bb.1 (align 2): +# CHECK-NEXT: bb.1 (align 4): # CHECK-NEXT: successors:{{ }} # CHECK-NEXT: {{^ $}} # CHECK-NEXT: CONSTPOOL_ENTRY 1, %const.0, 4 # CHECK-NEXT: {{^ $}} -# CHECK-NEXT: bb.2.entry (align 1): +# CHECK-NEXT: bb.2.entry (align 2): # CHECK-NEXT: liveins: $d13, $s27, $r10, $r9, $r8, $s26, $d12, $s25, $s24, # CHECK-SAME: $d15, $s30, $s31, $d14, $s28, $s29, $lr, $r0, $d21, # CHECK-SAME: $r3, $q10, $d20, $d17, $r2, $d25, $q11, $d22, $d23, diff --git a/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir --- a/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir +++ b/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir @@ -55,16 +55,16 @@ alignment: 2 #CHECK: B %[[BB4:bb.[0-9]]] -#CHECK: bb.{{.}} (align 2): +#CHECK: bb.{{.}} (align 4): #CHECK: successors: #CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 4 -#CHECK: bb.{{.}} (align 2): +#CHECK: bb.{{.}} (align 4): #CHECK: successors: #CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 4 -#CHECK: bb.{{.}} (align 1): +#CHECK: bb.{{.}} (align 2): #CHECK: successors: #CHECK: CONSTPOOL_ENTRY {{.}}, %const.{{.}}, 2 -#CHECK: [[BB4]].entry (align 2): +#CHECK: [[BB4]].entry (align 4): body: | bb.0.entry: diff --git a/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir b/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir --- a/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir +++ b/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir @@ -53,13 +53,13 @@ alignment: 2 #CHECK: t2B %[[BB3:bb.[0-9]]] -#CHECK: bb.{{.}} (align 2): +#CHECK: bb.{{.}} (align 4): #CHECK: successors: #CHECK: CONSTPOOL_ENTRY 2, %const.{{.}}, 4 -#CHECK: bb.{{.}} (align 1): +#CHECK: bb.{{.}} (align 2): #CHECK: successors: #CHECK: CONSTPOOL_ENTRY 3, %const.{{.}}, 2 -#CHECK: [[BB3]].entry (align 1): +#CHECK: [[BB3]].entry (align 2): body: | bb.0.entry: diff --git a/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir --- a/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir +++ b/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir @@ -76,11 +76,11 @@ isTargetSpecific: false -#CHECK: bb.{{.*}} (align 1): +#CHECK: bb.{{.*}} (align 2): #CHECK: successors: #CHECK: CONSTPOOL_ENTRY 1, %const{{.*}}, 2 # We want this block to be 4 byte aligned: -#CHECK: bb.{{.*}}.LA (align 2): +#CHECK: bb.{{.*}}.LA (align 4): body: | bb.0.entry: diff --git a/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir --- a/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir +++ b/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir @@ -77,7 +77,7 @@ isTargetSpecific: false -#CHECK: bb.{{.*}} (align 1): +#CHECK: bb.{{.*}} (align 2): #CHECK: successors: #CHECK: CONSTPOOL_ENTRY 1, %const{{.*}}, 2 # diff --git a/llvm/test/CodeGen/Mips/unaligned-memops-mapping.mir b/llvm/test/CodeGen/Mips/unaligned-memops-mapping.mir --- a/llvm/test/CodeGen/Mips/unaligned-memops-mapping.mir +++ b/llvm/test/CodeGen/Mips/unaligned-memops-mapping.mir @@ -118,7 +118,7 @@ # CHECK: c: 60 25 90 03 swr $1, 3($5) # CHECK-LABEL: g2: -# CHECK: 14: 60 24 64 00 lwle $1, 0($4) -# CHECK: 18: 60 24 66 03 lwre $1, 3($4) -# CHECK: 1c: 60 25 a0 00 swle $1, 0($5) -# CHECK: 20: 60 25 a2 03 swre $1, 3($5) +# CHECK: 12: 60 24 64 00 lwle $1, 0($4) +# CHECK: 16: 60 24 66 03 lwre $1, 3($4) +# CHECK: 1a: 60 25 a0 00 swle $1, 0($5) +# CHECK: 1e: 60 25 a2 03 swre $1, 3($5) diff --git a/llvm/test/CodeGen/PowerPC/block-placement.mir b/llvm/test/CodeGen/PowerPC/block-placement.mir --- a/llvm/test/CodeGen/PowerPC/block-placement.mir +++ b/llvm/test/CodeGen/PowerPC/block-placement.mir @@ -212,7 +212,7 @@ ; CHECK: successors: %bb.11(0x80000000) ; CHECK: B %bb.11 - ; CHECK: bb.8.while.body.i (align 4): + ; CHECK: bb.8.while.body.i (align 16): ; CHECK: successors: %bb.11(0x04000000), %bb.9(0x7c000000) ; CHECK: BCC 76, killed renamable $cr0, %bb.11 diff --git a/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir b/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir --- a/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir +++ b/llvm/test/CodeGen/X86/tail-merge-after-mbp.mir @@ -26,7 +26,7 @@ ; CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8) ; CHECK: TEST64rr $rax, $rax, implicit-def $eflags ; CHECK: JCC_1 %bb.1, 4, implicit $eflags - ; CHECK: bb.5 (align 4): + ; CHECK: bb.5 (align 16): ; CHECK: successors: %bb.6(0x71555555), %bb.8(0x0eaaaaab) ; CHECK: CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load 8), (load 8) ; CHECK: JCC_1 %bb.8, 5, implicit $eflags diff --git a/llvm/test/DebugInfo/X86/debug-loc-offset.mir b/llvm/test/DebugInfo/X86/debug-loc-offset.mir --- a/llvm/test/DebugInfo/X86/debug-loc-offset.mir +++ b/llvm/test/DebugInfo/X86/debug-loc-offset.mir @@ -32,7 +32,7 @@ # Checking that we have two compile units with two sets of high/lo_pc. # CHECK: .debug_info contents # CHECK: DW_TAG_compile_unit -# CHECK: DW_AT_low_pc {{.*}} (0x0000000000000020 ".text") +# CHECK: DW_AT_low_pc {{.*}} (0x0000000000000018 ".text") # CHECK: DW_AT_high_pc # # CHECK: DW_TAG_subprogram @@ -42,8 +42,8 @@ # CHECK: DW_TAG_formal_parameter # CHECK-NOT: DW_TAG # CHECK: DW_AT_location [DW_FORM_sec_offset] ({{.*}} -# CHECK-NEXT: [0x00000029, 0x00000037): DW_OP_breg0 EAX+0, DW_OP_deref -# CHECK-NEXT: [0x00000037, 0x00000063): DW_OP_breg5 EBP-8, DW_OP_deref, DW_OP_deref +# CHECK-NEXT: [0x00000021, 0x0000002f): DW_OP_breg0 EAX+0, DW_OP_deref +# CHECK-NEXT: [0x0000002f, 0x0000005b): DW_OP_breg5 EBP-8, DW_OP_deref, DW_OP_deref # CHECK-NEXT: DW_AT_name [DW_FORM_strp]{{.*}}"a" # # CHECK: DW_TAG_variable