diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -2116,13 +2116,10 @@ PrefFunctionAlignment = llvm::Align(1ULL << LogAlign); } - /// Set the target's preferred loop alignment. Default alignment is zero, it - /// means the target does not care about loop alignment. The alignment is - /// specified in log2(bytes). The target may also override - /// getPrefLoopAlignment to provide per-loop values. - void setPrefLoopLogAlignment(unsigned LogAlign) { - PrefLoopAlignment = llvm::Align(1ULL << LogAlign); - } + /// Set the target's preferred loop alignment. Default alignment is one, it + /// means the target does not care about loop alignment. The target may also + /// override getPrefLoopAlignment to provide per-loop values. + void setPrefLoopAlignment(llvm::Align Align) { PrefLoopAlignment = Align; } /// Set the minimum stack alignment of an argument. void setMinStackArgumentAlignment(unsigned Align) { diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -643,7 +643,7 @@ setMinFunctionAlignment(llvm::Align(4)); // Set preferred alignments. setPrefFunctionLogAlignment(STI.getPrefFunctionLogAlignment()); - setPrefLoopLogAlignment(STI.getPrefLoopLogAlignment()); + setPrefLoopAlignment(llvm::Align(1ULL << STI.getPrefLoopLogAlignment())); // Only change the limit for entries in a jump table if specified by // the sub target, but not at the command line. diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -1419,7 +1419,8 @@ // Prefer likely predicted branches to selects on out-of-order cores. PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder(); - setPrefLoopLogAlignment(Subtarget->getPrefLoopLogAlignment()); + setPrefLoopAlignment( + llvm::Align(1UL << Subtarget->getPrefLoopLogAlignment())); setMinFunctionAlignment(Subtarget->isThumb() ? llvm::Align(2) : llvm::Align(4)); diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -1235,7 +1235,7 @@ Subtarget(ST) { auto &HRI = *Subtarget.getRegisterInfo(); - setPrefLoopLogAlignment(4); + setPrefLoopAlignment(llvm::Align(16)); setPrefFunctionLogAlignment(4); setMinFunctionAlignment(llvm::Align(4)); setStackPointerRegisterToSaveRestore(HRI.getStackRegister()); diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1200,7 +1200,7 @@ case PPC::DIR_PWR8: case PPC::DIR_PWR9: setPrefFunctionLogAlignment(4); - setPrefLoopLogAlignment(4); + setPrefLoopAlignment(llvm::Align(16)); break; } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -73,9 +73,10 @@ static cl::opt ExperimentalPrefLoopAlignment( "x86-experimental-pref-loop-alignment", cl::init(4), - cl::desc("Sets the preferable loop alignment for experiments " - "(the last x86-experimental-pref-loop-alignment bits" - " of the loop header PC will be 0)."), + cl::desc( + "Sets the preferable loop alignment for experiments (as log2 bytes)" + "(the last x86-experimental-pref-loop-alignment bits" + " of the loop header PC will be 0)."), cl::Hidden); // Added in 10.0. @@ -1892,7 +1893,7 @@ MaxLoadsPerMemcmpOptSize = 2; // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4). - setPrefLoopLogAlignment(ExperimentalPrefLoopAlignment); + setPrefLoopAlignment(llvm::Align(1UL << ExperimentalPrefLoopAlignment)); // An out-of-order CPU can speculatively execute past a predictable branch, // but a conditional move could be stalled by an expensive earlier operation.