diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -180,23 +180,23 @@ LoadInst *cloneImpl() const; public: - LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr = "", - Instruction *InsertBefore = nullptr); + LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, + Instruction *InsertBefore); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, - Instruction *InsertBefore = nullptr); + Instruction *InsertBefore); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, BasicBlock *InsertAtEnd); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, - MaybeAlign Align, Instruction *InsertBefore = nullptr); + Align Align, Instruction *InsertBefore = nullptr); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, - MaybeAlign Align, BasicBlock *InsertAtEnd); + Align Align, BasicBlock *InsertAtEnd); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, - MaybeAlign Align, AtomicOrdering Order, + Align Align, AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, Instruction *InsertBefore = nullptr); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, - MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, + Align Align, AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); /// Return true if this is a load from a volatile memory location. @@ -211,18 +211,14 @@ /// Return the alignment of the access that is being performed. /// FIXME: Remove this function once transition to Align is over. /// Use getAlign() instead. - unsigned getAlignment() const { - if (const auto MA = getAlign()) - return MA->value(); - return 0; - } + unsigned getAlignment() const { return getAlign().value(); } /// Return the alignment of the access that is being performed. - MaybeAlign getAlign() const { - return decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31); + Align getAlign() const { + return *decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31); } - void setAlignment(MaybeAlign Alignment); + void setAlignment(Align Alignment); /// Returns the ordering constraint of this load instruction. AtomicOrdering getOrdering() const { diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4563,7 +4563,7 @@ MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), - *I.getAlign(), AAMDNodes(), nullptr, SSID, Order); + I.getAlign(), AAMDNodes(), nullptr, SSID, Order); InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG); diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -2015,7 +2015,7 @@ else if (AllocaInst *AI = dyn_cast(P)) AI->setAlignment(MaybeAlign(Bytes)); else if (LoadInst *LI = dyn_cast(P)) - LI->setAlignment(MaybeAlign(Bytes)); + LI->setAlignment(Align(Bytes)); else if (StoreInst *SI = dyn_cast(P)) SI->setAlignment(MaybeAlign(Bytes)); else diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -1326,6 +1326,15 @@ "Alignment required for atomic load"); } +Align computeLoadAlign(Type *Ty, BasicBlock *BB) { + const DataLayout &DL = BB->getModule()->getDataLayout(); + return DL.getABITypeAlign(Ty); +} + +Align computeLoadAlign(Type *Ty, Instruction *I) { + return computeLoadAlign(Ty, I->getParent()); +} + LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, Instruction *InsertBef) : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {} @@ -1336,36 +1345,38 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, Instruction *InsertBef) - : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertBef) {} + : LoadInst(Ty, Ptr, Name, isVolatile, computeLoadAlign(Ty, InsertBef), + InsertBef) {} LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, BasicBlock *InsertAE) - : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertAE) {} + : LoadInst(Ty, Ptr, Name, isVolatile, computeLoadAlign(Ty, InsertAE), + InsertAE) {} LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, - MaybeAlign Align, Instruction *InsertBef) + Align Align, Instruction *InsertBef) : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, SyncScope::System, InsertBef) {} LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, - MaybeAlign Align, BasicBlock *InsertAE) + Align Align, BasicBlock *InsertAE) : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, SyncScope::System, InsertAE) {} LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, - MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, + Align Align, AtomicOrdering Order, SyncScope::ID SSID, Instruction *InsertBef) : UnaryInstruction(Ty, Load, Ptr, InsertBef) { assert(Ty == cast(Ptr->getType())->getElementType()); setVolatile(isVolatile); - setAlignment(MaybeAlign(Align)); + setAlignment(Align); setAtomic(Order, SSID); AssertOK(); setName(Name); } LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, - MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, + Align Align, AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAE) : UnaryInstruction(Ty, Load, Ptr, InsertAE) { assert(Ty == cast(Ptr->getType())->getElementType()); @@ -1376,8 +1387,8 @@ setName(Name); } -void LoadInst::setAlignment(MaybeAlign Align) { - assert((!Align || *Align <= MaximumAlignment) && +void LoadInst::setAlignment(Align Align) { + assert(Align <= MaximumAlignment && "Alignment is greater than MaximumAlignment!"); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | (encode(Align) << 1)); @@ -4233,8 +4244,7 @@ LoadInst *LoadInst::cloneImpl() const { return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(), - MaybeAlign(getAlignment()), getOrdering(), - getSyncScopeID()); + getAlign(), getOrdering(), getSyncScopeID()); } StoreInst *StoreInst::cloneImpl() const { diff --git a/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp --- a/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp @@ -113,7 +113,7 @@ createMemCpyLoopKnownSize(/* ConvertedInst */ SI, /* SrcAddr */ SrcAddr, /* DstAddr */ DstAddr, /* CopyLen */ CopyLen, - /* SrcAlign */ LI->getAlign().valueOrOne(), + /* SrcAlign */ LI->getAlign(), /* DestAlign */ SI->getAlign().valueOrOne(), /* SrcIsVolatile */ LI->isVolatile(), /* DstIsVolatile */ SI->isVolatile(), TTI); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -194,8 +194,7 @@ Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); LoadInst *L = Builder.CreateLoad(IntType, Src); // Alignment from the mem intrinsic will be better, so use it. - L->setAlignment( - MaybeAlign(CopySrcAlign)); // FIXME: Check if we can use Align instead. + L->setAlignment(*CopySrcAlign); if (CopyMD) L->setMetadata(LLVMContext::MD_tbaa, CopyMD); MDNode *LoopMemParallelMD = @@ -2465,7 +2464,7 @@ &DT) >= 16) { Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(II->getType())); - return new LoadInst(II->getType(), Ptr); + return new LoadInst(II->getType(), Ptr, "", false, Align(16)); } break; case Intrinsic::ppc_vsx_lxvw4x: @@ -2512,7 +2511,7 @@ &DT) >= 32) { Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(II->getType())); - return new LoadInst(II->getType(), Ptr); + return new LoadInst(II->getType(), Ptr, "", false, Align(32)); } break; case Intrinsic::ppc_qpx_qvstfs: diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -281,7 +281,8 @@ if (auto *LT = dyn_cast(I)) { auto *V = getReplacement(LT->getPointerOperand()); assert(V && "Operand not replaced"); - auto *NewI = new LoadInst(I->getType(), V); + auto *NewI = new LoadInst(I->getType(), V, "", false, + IC.getDataLayout().getABITypeAlign(I->getType())); NewI->takeName(LT); IC.InsertNewInstWith(NewI, *LT); IC.replaceInstUsesWith(*LT, NewI); @@ -1008,7 +1009,7 @@ // if (SelectInst *SI = dyn_cast(Op)) { // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2). - const MaybeAlign Alignment(LI.getAlignment()); + Align Alignment = LI.getAlign(); if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(), Alignment, DL, SI) && isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(), diff --git a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp @@ -554,7 +554,7 @@ // visitLoadInst will propagate an alignment onto the load when TD is around, // and if TD isn't around, we can't handle the mixed case. bool isVolatile = FirstLI->isVolatile(); - MaybeAlign LoadAlignment(FirstLI->getAlignment()); + Align LoadAlignment = FirstLI->getAlign(); unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace(); // We can't sink the load if the loaded value could be modified between the @@ -584,12 +584,7 @@ !isSafeAndProfitableToSinkLoad(LI)) return nullptr; - // If some of the loads have an alignment specified but not all of them, - // we can't do the transformation. - if ((LoadAlignment.hasValue()) != (LI->getAlignment() != 0)) - return nullptr; - - LoadAlignment = std::min(LoadAlignment, MaybeAlign(LI->getAlignment())); + LoadAlignment = std::min(LoadAlignment, Align(LI->getAlign())); // If the PHI is of volatile loads and the load block has multiple // successors, sinking it would remove a load of the volatile value from diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp --- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -320,21 +320,24 @@ WorkList.push_back(K); } + const DataLayout &DL = SE->getDataLayout(); while (!WorkList.empty()) { Instruction *J = WorkList.pop_back_val(); if (LoadInst *LI = dyn_cast(J)) { Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, LI->getPointerOperand(), SE); - - if (NewAlignment > *LI->getAlign()) { + Align OldAlignment = + DL.getValueOrABITypeAlignment(LI->getAlign(), LI->getType()); + if (NewAlignment > OldAlignment) { LI->setAlignment(NewAlignment); ++NumLoadAlignChanged; } } else if (StoreInst *SI = dyn_cast(J)) { Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, SI->getPointerOperand(), SE); - - if (NewAlignment > *SI->getAlign()) { + Align OldAlignment = DL.getValueOrABITypeAlignment( + SI->getAlign(), SI->getOperand(0)->getType()); + if (NewAlignment > OldAlignment) { SI->setAlignment(NewAlignment); ++NumStoreAlignChanged; } diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -890,8 +890,8 @@ void updateAlignment(Instruction *I, Instruction *Repl) { if (auto *ReplacementLoad = dyn_cast(Repl)) { - ReplacementLoad->setAlignment(MaybeAlign(std::min( - ReplacementLoad->getAlignment(), cast(I)->getAlignment()))); + ReplacementLoad->setAlignment( + std::min(ReplacementLoad->getAlign(), cast(I)->getAlign())); ++NumLoadsRemoved; } else if (auto *ReplacementStore = dyn_cast(Repl)) { ReplacementStore->setAlignment(