diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -4177,8 +4177,8 @@ bool NeedCopy = false; if (Addr.getAlignment() < Align && - llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) < - Align.getQuantity()) { + llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < + Align.getAsAlign()) { NeedCopy = true; } else if (I->hasLValue()) { auto LV = I->getKnownLValue(); diff --git a/llvm/include/llvm/Transforms/Utils/Local.h b/llvm/include/llvm/Transforms/Utils/Local.h --- a/llvm/include/llvm/Transforms/Utils/Local.h +++ b/llvm/include/llvm/Transforms/Utils/Local.h @@ -267,18 +267,18 @@ /// so if alignment is important, a more reliable approach is to simply align /// all global variables and allocation instructions to their preferred /// alignment from the beginning. -unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, - const DataLayout &DL, - const Instruction *CxtI = nullptr, - AssumptionCache *AC = nullptr, - const DominatorTree *DT = nullptr); +Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, + const DataLayout &DL, + const Instruction *CxtI = nullptr, + AssumptionCache *AC = nullptr, + const DominatorTree *DT = nullptr); /// Try to infer an alignment for the specified pointer. -inline unsigned getKnownAlignment(Value *V, const DataLayout &DL, - const Instruction *CxtI = nullptr, - AssumptionCache *AC = nullptr, - const DominatorTree *DT = nullptr) { - return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT); +inline Align getKnownAlignment(Value *V, const DataLayout &DL, + const Instruction *CxtI = nullptr, + AssumptionCache *AC = nullptr, + const DominatorTree *DT = nullptr) { + return getOrEnforceKnownAlignment(V, MaybeAlign(), DL, CxtI, AC, DT); } /// Create a call that matches the invoke \p II in terms of arguments, diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -1939,12 +1939,14 @@ // If this is a memcpy (or similar) then we may be able to improve the // alignment if (MemIntrinsic *MI = dyn_cast(CI)) { - unsigned DestAlign = getKnownAlignment(MI->getDest(), *DL); - if (DestAlign > MI->getDestAlignment()) + Align DestAlign = getKnownAlignment(MI->getDest(), *DL); + MaybeAlign MIDestAlign = MI->getDestAlign(); + if (!MIDestAlign || DestAlign > *MIDestAlign) MI->setDestAlignment(DestAlign); if (MemTransferInst *MTI = dyn_cast(MI)) { - unsigned SrcAlign = getKnownAlignment(MTI->getSource(), *DL); - if (SrcAlign > MTI->getSourceAlignment()) + MaybeAlign MTISrcAlign = MTI->getSourceAlign(); + Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL); + if (!MTISrcAlign || SrcAlign > *MTISrcAlign) MTI->setSourceAlignment(SrcAlign); } } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -117,16 +117,16 @@ } Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) { - unsigned DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT); - unsigned CopyDstAlign = MI->getDestAlignment(); - if (CopyDstAlign < DstAlign){ + Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT); + MaybeAlign CopyDstAlign = MI->getDestAlign(); + if (!CopyDstAlign || *CopyDstAlign < DstAlign) { MI->setDestAlignment(DstAlign); return MI; } - unsigned SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT); - unsigned CopySrcAlign = MI->getSourceAlignment(); - if (CopySrcAlign < SrcAlign) { + Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT); + MaybeAlign CopySrcAlign = MI->getSourceAlign(); + if (!CopySrcAlign || *CopySrcAlign < SrcAlign) { MI->setSourceAlignment(SrcAlign); return MI; } @@ -234,9 +234,10 @@ } Instruction *InstCombiner::SimplifyAnyMemSet(AnyMemSetInst *MI) { - const unsigned KnownAlignment = + const Align KnownAlignment = getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT); - if (MI->getDestAlignment() < KnownAlignment) { + MaybeAlign MemSetAlign = MI->getDestAlign(); + if (!MemSetAlign || *MemSetAlign < KnownAlignment) { MI->setDestAlignment(KnownAlignment); return MI; } @@ -2456,7 +2457,7 @@ case Intrinsic::ppc_altivec_lvx: case Intrinsic::ppc_altivec_lvxl: // Turn PPC lvx -> load if the pointer is known aligned. - if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC, + if (getOrEnforceKnownAlignment(II->getArgOperand(0), Align(16), DL, II, &AC, &DT) >= 16) { Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(II->getType())); @@ -2473,7 +2474,7 @@ case Intrinsic::ppc_altivec_stvx: case Intrinsic::ppc_altivec_stvxl: // Turn stvx -> store if the pointer is known aligned. - if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC, + if (getOrEnforceKnownAlignment(II->getArgOperand(1), Align(16), DL, II, &AC, &DT) >= 16) { Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType()); @@ -2490,7 +2491,7 @@ } case Intrinsic::ppc_qpx_qvlfs: // Turn PPC QPX qvlfs -> load if the pointer is known aligned. - if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC, + if (getOrEnforceKnownAlignment(II->getArgOperand(0), Align(16), DL, II, &AC, &DT) >= 16) { Type *VTy = VectorType::get(Builder.getFloatTy(), @@ -2503,7 +2504,7 @@ break; case Intrinsic::ppc_qpx_qvlfd: // Turn PPC QPX qvlfd -> load if the pointer is known aligned. - if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC, + if (getOrEnforceKnownAlignment(II->getArgOperand(0), Align(32), DL, II, &AC, &DT) >= 32) { Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(II->getType())); @@ -2512,7 +2513,7 @@ break; case Intrinsic::ppc_qpx_qvstfs: // Turn PPC QPX qvstfs -> store if the pointer is known aligned. - if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC, + if (getOrEnforceKnownAlignment(II->getArgOperand(1), Align(16), DL, II, &AC, &DT) >= 16) { Type *VTy = VectorType::get( Builder.getFloatTy(), @@ -2525,7 +2526,7 @@ break; case Intrinsic::ppc_qpx_qvstfd: // Turn PPC QPX qvstfd -> store if the pointer is known aligned. - if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC, + if (getOrEnforceKnownAlignment(II->getArgOperand(1), Align(32), DL, II, &AC, &DT) >= 32) { Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType()); @@ -3349,9 +3350,8 @@ break; case Intrinsic::arm_neon_vld1: { - unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), - DL, II, &AC, &DT); - if (Value *V = simplifyNeonVld1(*II, MemAlign, Builder)) + Align MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT); + if (Value *V = simplifyNeonVld1(*II, MemAlign.value(), Builder)) return replaceInstUsesWith(*II, V); break; } @@ -3369,14 +3369,13 @@ case Intrinsic::arm_neon_vst2lane: case Intrinsic::arm_neon_vst3lane: case Intrinsic::arm_neon_vst4lane: { - unsigned MemAlign = - getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT); + Align MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT); unsigned AlignArg = II->getNumArgOperands() - 1; ConstantInt *IntrAlign = dyn_cast(II->getArgOperand(AlignArg)); - if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) + if (IntrAlign && IntrAlign->getZExtValue() < MemAlign.value()) return replaceOperand(*II, AlignArg, ConstantInt::get(Type::getInt32Ty(II->getContext()), - MemAlign, false)); + MemAlign.value(), false)); break; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -398,9 +398,10 @@ // is only subsequently read. SmallVector ToDelete; if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { - unsigned SourceAlign = getOrEnforceKnownAlignment( - Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT); - if (AI.getAlignment() <= SourceAlign && + MaybeAlign AllocaAlign = AI.getAlign(); + Align SourceAlign = getOrEnforceKnownAlignment( + Copy->getSource(), AllocaAlign, DL, &AI, &AC, &DT); + if ((!AllocaAlign || *AllocaAlign <= SourceAlign) && isDereferenceableForAllocaSize(Copy->getSource(), &AI, DL)) { LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); LLVM_DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); @@ -956,16 +957,16 @@ return Res; // Attempt to improve the alignment. - unsigned KnownAlign = getOrEnforceKnownAlignment( - Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT); - unsigned LoadAlign = LI.getAlignment(); - unsigned EffectiveLoadAlign = - LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType()); + Align KnownAlign = getOrEnforceKnownAlignment( + Op, DL.getPrefTypeAlign(LI.getType()), DL, &LI, &AC, &DT); + MaybeAlign LoadAlign = LI.getAlign(); + Align EffectiveLoadAlign = + LoadAlign ? *LoadAlign : DL.getABITypeAlign(LI.getType()); if (KnownAlign > EffectiveLoadAlign) - LI.setAlignment(MaybeAlign(KnownAlign)); + LI.setAlignment(KnownAlign); else if (LoadAlign == 0) - LI.setAlignment(MaybeAlign(EffectiveLoadAlign)); + LI.setAlignment(EffectiveLoadAlign); // Replace GEP indices if possible. if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) { @@ -1361,11 +1362,11 @@ return eraseInstFromFunction(SI); // Attempt to improve the alignment. - const Align KnownAlign = Align(getOrEnforceKnownAlignment( - Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT)); - const MaybeAlign StoreAlign = MaybeAlign(SI.getAlignment()); + const Align KnownAlign = getOrEnforceKnownAlignment( + Ptr, DL.getPrefTypeAlign(Val->getType()), DL, &SI, &AC, &DT); + const MaybeAlign StoreAlign = SI.getAlign(); const Align EffectiveStoreAlign = - StoreAlign ? *StoreAlign : Align(DL.getABITypeAlignment(Val->getType())); + StoreAlign ? *StoreAlign : DL.getABITypeAlign(Val->getType()); if (KnownAlign > EffectiveStoreAlign) SI.setAlignment(KnownAlign); diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -1277,8 +1277,8 @@ AssumptionCache &AC = LookupAssumptionCache(); DominatorTree &DT = LookupDomTree(); if (MDep->getSourceAlign() < ByValAlign && - getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign->value(), DL, - &CB, &AC, &DT) < ByValAlign->value()) + getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, &AC, + &DT) < ByValAlign) return false; // The address space of the memcpy source must match the byval argument diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -1374,8 +1374,8 @@ // If the pointer is already known to be sufficiently aligned, or if we can // round it up to a larger alignment, then we don't need a temporary. - if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >= - ByValAlignment) + if (getOrEnforceKnownAlignment(Arg, Align(ByValAlignment), DL, TheCall, + AC) >= ByValAlignment) return Arg; // Otherwise, we have to make a memcpy to get a safe alignment. This is bad diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -1156,9 +1156,8 @@ /// often possible though. If alignment is important, a more reliable approach /// is to simply align all global variables and allocation instructions to /// their preferred alignment from the beginning. -static unsigned enforceKnownAlignment(Value *V, unsigned Alignment, - unsigned PrefAlign, - const DataLayout &DL) { +static Align enforceKnownAlignment(Value *V, Align Alignment, Align PrefAlign, + const DataLayout &DL) { assert(PrefAlign > Alignment); V = V->stripPointerCasts(); @@ -1170,21 +1169,21 @@ // stripPointerCasts recurses through infinite layers of bitcasts, // while computeKnownBits is not allowed to traverse more than 6 // levels. - Alignment = std::max(AI->getAlignment(), Alignment); + Alignment = max(AI->getAlign(), Alignment); if (PrefAlign <= Alignment) return Alignment; // If the preferred alignment is greater than the natural stack alignment // then don't round up. This avoids dynamic stack realignment. - if (DL.exceedsNaturalStackAlignment(Align(PrefAlign))) + if (DL.exceedsNaturalStackAlignment(PrefAlign)) return Alignment; - AI->setAlignment(Align(PrefAlign)); + AI->setAlignment(PrefAlign); return PrefAlign; } if (auto *GO = dyn_cast(V)) { // TODO: as above, this shouldn't be necessary. - Alignment = std::max(GO->getAlignment(), Alignment); + Alignment = max(GO->getAlign(), Alignment); if (PrefAlign <= Alignment) return Alignment; @@ -1195,18 +1194,18 @@ if (!GO->canIncreaseAlignment()) return Alignment; - GO->setAlignment(Align(PrefAlign)); + GO->setAlignment(PrefAlign); return PrefAlign; } return Alignment; } -unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, - const DataLayout &DL, - const Instruction *CxtI, - AssumptionCache *AC, - const DominatorTree *DT) { +Align llvm::getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, + const DataLayout &DL, + const Instruction *CxtI, + AssumptionCache *AC, + const DominatorTree *DT) { assert(V->getType()->isPointerTy() && "getOrEnforceKnownAlignment expects a pointer!"); @@ -1218,13 +1217,13 @@ // LLVM doesn't support alignments larger than (1 << MaxAlignmentExponent). TrailZ = std::min(TrailZ, +Value::MaxAlignmentExponent); - unsigned Align = 1u << std::min(Known.getBitWidth() - 1, TrailZ); + Align Alignment = Align(1u << std::min(Known.getBitWidth() - 1, TrailZ)); - if (PrefAlign > Align) - Align = enforceKnownAlignment(V, Align, PrefAlign, DL); + if (PrefAlign && *PrefAlign > Alignment) + Alignment = enforceKnownAlignment(V, Alignment, *PrefAlign, DL); // We don't need to make any adjustment. - return Align; + return Alignment; } ///===---------------------------------------------------------------------===// diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -1015,11 +1015,11 @@ vectorizeStoreChain(Chains.second, InstructionsProcessed); } - unsigned NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(), - StackAdjustedAlignment, - DL, S0, nullptr, &DT); - if (NewAlign >= Alignment.value()) - Alignment = Align(NewAlign); + Align NewAlign = getOrEnforceKnownAlignment(S0->getPointerOperand(), + Align(StackAdjustedAlignment), + DL, S0, nullptr, &DT); + if (NewAlign >= Alignment) + Alignment = NewAlign; else return false; } @@ -1160,10 +1160,11 @@ vectorizeLoadChain(Chains.second, InstructionsProcessed); } - unsigned NewAlign = getOrEnforceKnownAlignment( - L0->getPointerOperand(), StackAdjustedAlignment, DL, L0, nullptr, &DT); - if (NewAlign >= Alignment.value()) - Alignment = Align(NewAlign); + Align NewAlign = getOrEnforceKnownAlignment(L0->getPointerOperand(), + Align(StackAdjustedAlignment), + DL, L0, nullptr, &DT); + if (NewAlign >= Alignment) + Alignment = NewAlign; else return false; }