diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -3277,12 +3277,12 @@ // The type Ty passed here would then be "void". Skip the alignment // checks, but do not return false right away, since that confuses // LSR into crashing. - unsigned A = DL.getABITypeAlignment(Ty); + Align A = DL.getABITypeAlign(Ty); // The base offset must be a multiple of the alignment. - if ((AM.BaseOffs % A) != 0) + if (!isAligned(A, AM.BaseOffs)) return false; // The shifted offset must fit in 11 bits. - if (!isInt<11>(AM.BaseOffs >> Log2_32(A))) + if (!isInt<11>(AM.BaseOffs >> Log2(A))) return false; } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -14772,17 +14772,18 @@ EVT MemVT = LD->getMemoryVT(); Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); + Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty); Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); - unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); + Align ScalarABIAlignment = DAG.getDataLayout().getABITypeAlign(STy); if (LD->isUnindexed() && VT.isVector() && ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && // P8 and later hardware should just use LOAD. - !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || - VT == MVT::v4i32 || VT == MVT::v4f32)) || + !Subtarget.hasP8Vector() && + (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || + VT == MVT::v4f32)) || (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && - LD->getAlignment() >= ScalarABIAlignment)) && - LD->getAlignment() < ABIAlignment) { + LD->getAlign() >= ScalarABIAlignment)) && + LD->getAlign() < ABIAlignment) { // This is a type-legal unaligned Altivec or QPX load. SDValue Chain = LD->getChain(); SDValue Ptr = LD->getBasePtr(); diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -1127,10 +1127,8 @@ if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true)) return false; - unsigned Alignment = S->getAlignment(); - unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType()); - if (Alignment == 0) // Ensure that codegen never sees alignment 0 - Alignment = ABIAlignment; + Align Alignment = S->getAlign(); + Align ABIAlignment = DL.getABITypeAlign(Val->getType()); bool Aligned = Alignment >= ABIAlignment; X86AddressMode AM; @@ -1321,14 +1319,9 @@ if (!X86SelectAddress(Ptr, AM)) return false; - unsigned Alignment = LI->getAlignment(); - unsigned ABIAlignment = DL.getABITypeAlignment(LI->getType()); - if (Alignment == 0) // Ensure that codegen never sees alignment 0 - Alignment = ABIAlignment; - unsigned ResultReg = 0; if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg, - Alignment)) + LI->getAlign().value())) return false; updateValueMap(I, ResultReg); diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -509,9 +509,8 @@ std::map NewGlobals; // Get the alignment of the global, either explicit or target-specific. - unsigned StartAlignment = GV->getAlignment(); - if (StartAlignment == 0) - StartAlignment = DL.getABITypeAlignment(GV->getType()); + Align StartAlignment = + DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getType()); // Loop over all users and create replacement variables for used aggregate // elements. @@ -554,7 +553,7 @@ // had 256 byte alignment for example, something might depend on that: // propagate info to each field. uint64_t FieldOffset = Layout.getElementOffset(ElementIdx); - Align NewAlign(MinAlign(StartAlignment, FieldOffset)); + Align NewAlign = commonAlignment(StartAlignment, FieldOffset); if (NewAlign > DL.getABITypeAlign(STy->getElementType(ElementIdx))) NGV->setAlignment(NewAlign); @@ -570,7 +569,7 @@ // Calculate the known alignment of the field. If the original aggregate // had 256 byte alignment for example, something might depend on that: // propagate info to each field. - Align NewAlign(MinAlign(StartAlignment, EltSize * ElementIdx)); + Align NewAlign = commonAlignment(StartAlignment, EltSize * ElementIdx); if (NewAlign > EltAlign) NGV->setAlignment(NewAlign); transferSRADebugInfo(GV, NGV, FragmentSizeInBits * ElementIdx, diff --git a/llvm/lib/Transforms/IPO/Inliner.cpp b/llvm/lib/Transforms/IPO/Inliner.cpp --- a/llvm/lib/Transforms/IPO/Inliner.cpp +++ b/llvm/lib/Transforms/IPO/Inliner.cpp @@ -191,8 +191,8 @@ // function. Also, AllocasForType can be empty of course! bool MergedAwayAlloca = false; for (AllocaInst *AvailableAlloca : AllocasForType) { - unsigned Align1 = AI->getAlignment(), - Align2 = AvailableAlloca->getAlignment(); + Align Align1 = AI->getAlign(); + Align Align2 = AvailableAlloca->getAlign(); // The available alloca has to be in the right function, not in some other // function in this SCC. @@ -219,18 +219,8 @@ AI->replaceAllUsesWith(AvailableAlloca); - if (Align1 != Align2) { - if (!Align1 || !Align2) { - const DataLayout &DL = Caller->getParent()->getDataLayout(); - unsigned TypeAlign = DL.getABITypeAlignment(AI->getAllocatedType()); - - Align1 = Align1 ? Align1 : TypeAlign; - Align2 = Align2 ? Align2 : TypeAlign; - } - - if (Align1 > Align2) - AvailableAlloca->setAlignment(AI->getAlign()); - } + if (Align1 > Align2) + AvailableAlloca->setAlignment(AI->getAlign()); AI->eraseFromParent(); MergedAwayAlloca = true; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -93,8 +93,8 @@ Type *CastElTy = PTy->getElementType(); if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr; - unsigned AllocElTyAlign = DL.getABITypeAlignment(AllocElTy); - unsigned CastElTyAlign = DL.getABITypeAlignment(CastElTy); + Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy); + Align CastElTyAlign = DL.getABITypeAlign(CastElTy); if (CastElTyAlign < AllocElTyAlign) return nullptr; // If the allocation has multiple uses, only promote it if we are strictly diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -1369,15 +1369,9 @@ return; } - uint64_t Align; - if (ClPreserveAlignment) { - Align = LI.getAlignment(); - if (Align == 0) - Align = DL.getABITypeAlignment(LI.getType()); - } else { - Align = 1; - } - Value *Shadow = DFSF.loadShadow(LI.getPointerOperand(), Size, Align, &LI); + Align Alignment = ClPreserveAlignment ? LI.getAlign() : Align(1); + Value *Shadow = + DFSF.loadShadow(LI.getPointerOperand(), Size, Alignment.value(), &LI); if (ClCombinePointerLabelsOnLoad) { Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand()); Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI); diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1109,7 +1109,7 @@ void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr, unsigned Size, Align Alignment) { const DataLayout &DL = F.getParent()->getDataLayout(); - const Align IntptrAlignment = Align(DL.getABITypeAlignment(MS.IntptrTy)); + const Align IntptrAlignment = DL.getABITypeAlign(MS.IntptrTy); unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy); assert(IntptrAlignment >= kMinOriginAlignment); assert(IntptrSize >= kOriginSize); diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -1894,7 +1894,7 @@ // We start with an alignment of one and try to find instructions that allow // us to prove better alignment. - unsigned Alignment = 1; + Align Alignment; // Keep track of which types of access we see bool SawUnorderedAtomic = false; bool SawNotAtomic = false; @@ -1942,10 +1942,7 @@ SawUnorderedAtomic |= Load->isAtomic(); SawNotAtomic |= !Load->isAtomic(); - unsigned InstAlignment = Load->getAlignment(); - if (!InstAlignment) - InstAlignment = - MDL.getABITypeAlignment(Load->getType()); + Align InstAlignment = Load->getAlign(); // Note that proving a load safe to speculate requires proving // sufficient alignment at the target location. Proving it guaranteed @@ -1973,10 +1970,7 @@ // already know that promotion is safe, since it may have higher // alignment than any other guaranteed stores, in which case we can // raise the alignment on the promoted store. - unsigned InstAlignment = Store->getAlignment(); - if (!InstAlignment) - InstAlignment = - MDL.getABITypeAlignment(Store->getValueOperand()->getType()); + Align InstAlignment = Store->getAlign(); if (!DereferenceableInPH || !SafeToInsertStore || (InstAlignment > Alignment)) { @@ -2079,7 +2073,8 @@ SSAUpdater SSA(&NewPHIs); LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, *CurAST, MSSAU, *LI, DL, - Alignment, SawUnorderedAtomic, AATags, *SafetyInfo); + Alignment.value(), SawUnorderedAtomic, AATags, + *SafetyInfo); // Set up the preheader to have a definition of the value. It is the live-out // value from the preheader that uses in the loop will use. @@ -2088,7 +2083,7 @@ SomePtr->getName() + ".promoted", Preheader->getTerminator()); if (SawUnorderedAtomic) PreheaderLoad->setOrdering(AtomicOrdering::Unordered); - PreheaderLoad->setAlignment(Align(Alignment)); + PreheaderLoad->setAlignment(Alignment); PreheaderLoad->setDebugLoc(DebugLoc()); if (AATags) PreheaderLoad->setAAMetadata(AATags); diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -4267,7 +4267,7 @@ const Align Alignment = commonAlignment(AI.getAlign(), P.beginOffset()); // If we will get at least this much alignment from the type alone, leave // the alloca's alignment unconstrained. - const bool IsUnconstrained = Alignment <= DL.getABITypeAlignment(SliceTy); + const bool IsUnconstrained = Alignment <= DL.getABITypeAlign(SliceTy); NewAI = new AllocaInst( SliceTy, AI.getType()->getAddressSpace(), nullptr, IsUnconstrained ? DL.getPrefTypeAlign(SliceTy) : Alignment, diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -3147,29 +3147,11 @@ PStore->getAAMetadata(AAMD, /*Merge=*/false); PStore->getAAMetadata(AAMD, /*Merge=*/true); SI->setAAMetadata(AAMD); - unsigned PAlignment = PStore->getAlignment(); - unsigned QAlignment = QStore->getAlignment(); - unsigned TypeAlignment = - DL.getABITypeAlignment(SI->getValueOperand()->getType()); - unsigned MinAlignment; - unsigned MaxAlignment; - std::tie(MinAlignment, MaxAlignment) = std::minmax(PAlignment, QAlignment); // Choose the minimum alignment. If we could prove both stores execute, we // could use biggest one. In this case, though, we only know that one of the // stores executes. And we don't know it's safe to take the alignment from a // store that doesn't execute. - if (MinAlignment != 0) { - // Choose the minimum of all non-zero alignments. - SI->setAlignment(Align(MinAlignment)); - } else if (MaxAlignment != 0) { - // Choose the minimal alignment between the non-zero alignment and the ABI - // default alignment for the type of the stored value. - SI->setAlignment(Align(std::min(MaxAlignment, TypeAlignment))); - } else { - // If both alignments are zero, use ABI default alignment for the type of - // the stored value. - SI->setAlignment(Align(TypeAlignment)); - } + SI->setAlignment(std::min(PStore->getAlign(), QStore->getAlign())); QStore->eraseFromParent(); PStore->eraseFromParent();