diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -1668,16 +1668,19 @@ /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of /// converting the string to 'bool' for the isVolatile parameter. LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) { - return Insert(new LoadInst(Ty, Ptr), Name); + const DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); + return CreateAlignedLoad(Ty, Ptr, DL.getABITypeAlign(Ty), Name); } LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") { - return Insert(new LoadInst(Ty, Ptr), Name); + const DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); + return CreateAlignedLoad(Ty, Ptr, DL.getABITypeAlign(Ty), Name); } LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile, const Twine &Name = "") { - return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile), Name); + const DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); + return CreateAlignedLoad(Ty, Ptr, DL.getABITypeAlign(Ty), isVolatile, Name); } // Deprecated [opaque pointer types] @@ -1708,9 +1711,11 @@ } LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const char *Name) { - LoadInst *LI = CreateLoad(Ty, Ptr, Name); - LI->setAlignment(Align); - return LI; + if (!Align) { + const DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); + Align = DL.getABITypeAlign(Ty); + } + return Insert(new LoadInst(Ty, Ptr, Twine(), false, *Align), Name); } LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, @@ -1721,9 +1726,11 @@ } LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, const Twine &Name = "") { - LoadInst *LI = CreateLoad(Ty, Ptr, Name); - LI->setAlignment(Align); - return LI; + if (!Align) { + const DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); + Align = DL.getABITypeAlign(Ty); + } + return Insert(new LoadInst(Ty, Ptr, Twine(), false, *Align), Name); } LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, @@ -1735,9 +1742,11 @@ } LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name = "") { - LoadInst *LI = CreateLoad(Ty, Ptr, isVolatile, Name); - LI->setAlignment(Align); - return LI; + if (!Align) { + const DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); + Align = DL.getABITypeAlign(Ty); + } + return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name); } // Deprecated [opaque pointer types] diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -180,23 +180,23 @@ LoadInst *cloneImpl() const; public: - LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr = "", - Instruction *InsertBefore = nullptr); + LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, + Instruction *InsertBefore); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, - Instruction *InsertBefore = nullptr); + Instruction *InsertBefore); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, BasicBlock *InsertAtEnd); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, - MaybeAlign Align, Instruction *InsertBefore = nullptr); + Align Align, Instruction *InsertBefore = nullptr); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, - MaybeAlign Align, BasicBlock *InsertAtEnd); + Align Align, BasicBlock *InsertAtEnd); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, - MaybeAlign Align, AtomicOrdering Order, + Align Align, AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, Instruction *InsertBefore = nullptr); LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, - MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, + Align Align, AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); /// Return true if this is a load from a volatile memory location. @@ -212,17 +212,15 @@ /// FIXME: Remove this function once transition to Align is over. /// Use getAlign() instead. unsigned getAlignment() const { - if (const auto MA = getAlign()) - return MA->value(); - return 0; + return getAlign().value(); } /// Return the alignment of the access that is being performed. - MaybeAlign getAlign() const { - return decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31); + Align getAlign() const { + return *decodeMaybeAlign((getSubclassDataFromInstruction() >> 1) & 31); } - void setAlignment(MaybeAlign Alignment); + void setAlignment(Align Alignment); /// Returns the ordering constraint of this load instruction. AtomicOrdering getOrdering() const { diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -6998,7 +6998,10 @@ return Error(ExplicitTypeLoc, "explicit pointee type doesn't match operand's pointee type"); - Inst = new LoadInst(Ty, Val, "", isVolatile, Alignment, Ordering, SSID); + if (!Alignment) + Alignment = M->getDataLayout().getABITypeAlign(Ty); + + Inst = new LoadInst(Ty, Val, "", isVolatile, *Alignment, Ordering, SSID); return AteExtraComma ? InstExtraComma : InstNormal; } diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -4824,7 +4824,11 @@ MaybeAlign Align; if (Error Err = parseAlignmentValue(Record[OpNum], Align)) return Err; - I = new LoadInst(Ty, Op, "", Record[OpNum + 1], Align); + if (!Align) { + const DataLayout &DL = TheModule->getDataLayout(); + Align = DL.getABITypeAlign(Ty); + } + I = new LoadInst(Ty, Op, "", Record[OpNum + 1], *Align); InstructionList.push_back(I); break; } @@ -4861,7 +4865,11 @@ MaybeAlign Align; if (Error Err = parseAlignmentValue(Record[OpNum], Align)) return Err; - I = new LoadInst(Ty, Op, "", Record[OpNum + 1], Align, Ordering, SSID); + if (!Align) { + const DataLayout &DL = TheModule->getDataLayout(); + Align = DL.getABITypeAlign(Ty); + } + I = new LoadInst(Ty, Op, "", Record[OpNum + 1], *Align, Ordering, SSID); InstructionList.push_back(I); break; } diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -383,7 +383,7 @@ Value *NewAddr = Builder.CreateBitCast(Addr, PT); auto *NewLI = Builder.CreateLoad(NewTy, NewAddr); - NewLI->setAlignment(MaybeAlign(LI->getAlignment())); + NewLI->setAlignment(LI->getAlign()); NewLI->setVolatile(LI->isVolatile()); NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID()); LLVM_DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n"); @@ -1377,7 +1377,7 @@ Builder.SetInsertPoint(BB); LoadInst *InitLoaded = Builder.CreateLoad(ResultTy, Addr); // Atomics require at least natural alignment. - InitLoaded->setAlignment(MaybeAlign(ResultTy->getPrimitiveSizeInBits() / 8)); + InitLoaded->setAlignment(Align(ResultTy->getPrimitiveSizeInBits() / 8)); Builder.CreateBr(LoopBB); // Start the main loop block now that we've taken care of the preliminaries. diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -248,8 +248,7 @@ return SI->getAlign().getValueOr(DL->getABITypeAlign(ValTy)); } if (const LoadInst *LI = dyn_cast(&I)) { - Type *ValTy = LI->getType(); - return LI->getAlign().getValueOr(DL->getABITypeAlign(ValTy)); + return DL->getValueOrABITypeAlignment(LI->getAlign(), LI->getType()); } if (const AtomicCmpXchgInst *AI = dyn_cast(&I)) { // TODO(PR27168): This instruction has no alignment attribute, but unlike diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4569,8 +4569,8 @@ MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), - I.getAlign().getValueOr(DAG.getEVTAlign(MemVT)), AAMDNodes(), nullptr, - SSID, Order); + DL->getValueOrABITypeAlignment(I.getAlign(), I.getType()), AAMDNodes(), + nullptr, SSID, Order); InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG); diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -2007,13 +2007,13 @@ void LLVMSetAlignment(LLVMValueRef V, unsigned Bytes) { Value *P = unwrap(V); if (GlobalObject *GV = dyn_cast(P)) - GV->setAlignment(MaybeAlign(Bytes)); + GV->setAlignment(Align(Bytes)); else if (AllocaInst *AI = dyn_cast(P)) - AI->setAlignment(MaybeAlign(Bytes)); + AI->setAlignment(Align(Bytes)); else if (LoadInst *LI = dyn_cast(P)) - LI->setAlignment(MaybeAlign(Bytes)); + LI->setAlignment(Align(Bytes)); else if (StoreInst *SI = dyn_cast(P)) - SI->setAlignment(MaybeAlign(Bytes)); + SI->setAlignment(Align(Bytes)); else llvm_unreachable( "only GlobalValue, AllocaInst, LoadInst and StoreInst have alignment"); diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -1340,6 +1340,15 @@ "Alignment required for atomic load"); } +Align computeLoadAlign(Type *Ty, BasicBlock *BB) { + const DataLayout &DL = BB->getParent()->getParent()->getDataLayout(); + return DL.getABITypeAlign(Ty); +} + +Align computeLoadAlign(Type *Ty, Instruction *I) { + return computeLoadAlign(Ty, I->getParent()); +} + LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, Instruction *InsertBef) : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {} @@ -1350,36 +1359,36 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, Instruction *InsertBef) - : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertBef) {} + : LoadInst(Ty, Ptr, Name, isVolatile, computeLoadAlign(Ty, InsertBef), InsertBef) {} LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, BasicBlock *InsertAE) - : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/None, InsertAE) {} + : LoadInst(Ty, Ptr, Name, isVolatile, computeLoadAlign(Ty, InsertAE), InsertAE) {} LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, - MaybeAlign Align, Instruction *InsertBef) + Align Align, Instruction *InsertBef) : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, SyncScope::System, InsertBef) {} LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, - MaybeAlign Align, BasicBlock *InsertAE) + Align Align, BasicBlock *InsertAE) : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, SyncScope::System, InsertAE) {} LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, - MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, + Align Align, AtomicOrdering Order, SyncScope::ID SSID, Instruction *InsertBef) : UnaryInstruction(Ty, Load, Ptr, InsertBef) { assert(Ty == cast(Ptr->getType())->getElementType()); setVolatile(isVolatile); - setAlignment(MaybeAlign(Align)); + setAlignment(Align); setAtomic(Order, SSID); AssertOK(); setName(Name); } LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, - MaybeAlign Align, AtomicOrdering Order, SyncScope::ID SSID, + Align Align, AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAE) : UnaryInstruction(Ty, Load, Ptr, InsertAE) { assert(Ty == cast(Ptr->getType())->getElementType()); @@ -1390,8 +1399,8 @@ setName(Name); } -void LoadInst::setAlignment(MaybeAlign Align) { - assert((!Align || *Align <= MaximumAlignment) && +void LoadInst::setAlignment(Align Align) { + assert(Align <= MaximumAlignment && "Alignment is greater than MaximumAlignment!"); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | (encode(Align) << 1)); @@ -4244,8 +4253,7 @@ LoadInst *LoadInst::cloneImpl() const { return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(), - MaybeAlign(getAlignment()), getOrdering(), - getSyncScopeID()); + getAlign(), getOrdering(), getSyncScopeID()); } StoreInst *StoreInst::cloneImpl() const { diff --git a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp --- a/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp +++ b/llvm/lib/Target/BPF/BPFAbstractMemberAccess.cpp @@ -878,8 +878,8 @@ if (CInfo.Kind == BPFPreserveFieldInfoAI) { // Load the global variable which represents the returned field info. - auto *LDInst = new LoadInst(Type::getInt32Ty(BB->getContext()), GV); - BB->getInstList().insert(Call->getIterator(), LDInst); + auto *LDInst = new LoadInst(Type::getInt32Ty(BB->getContext()), GV, "", + Call); Call->replaceAllUsesWith(LDInst); Call->eraseFromParent(); return true; @@ -896,8 +896,7 @@ // The original Call inst is removed. // Load the global variable. - auto *LDInst = new LoadInst(Type::getInt64Ty(BB->getContext()), GV); - BB->getInstList().insert(Call->getIterator(), LDInst); + auto *LDInst = new LoadInst(Type::getInt64Ty(BB->getContext()), GV, "", Call); // Generate a BitCast auto *BCInst = new BitCastInst(Base, Type::getInt8PtrTy(BB->getContext())); diff --git a/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp --- a/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp @@ -110,13 +110,14 @@ ConstantInt *CopyLen = ConstantInt::get(Type::getInt32Ty(Context), NumLoads); - createMemCpyLoopKnownSize(/* ConvertedInst */ SI, - /* SrcAddr */ SrcAddr, /* DstAddr */ DstAddr, - /* CopyLen */ CopyLen, - /* SrcAlign */ LI->getAlign().valueOrOne(), - /* DestAlign */ SI->getAlign().valueOrOne(), - /* SrcIsVolatile */ LI->isVolatile(), - /* DstIsVolatile */ SI->isVolatile(), TTI); + createMemCpyLoopKnownSize( + /* ConvertedInst */ SI, + /* SrcAddr */ SrcAddr, /* DstAddr */ DstAddr, + /* CopyLen */ CopyLen, + /* SrcAlign */ MaybeAlign(LI->getAlign()).valueOrOne(), + /* DestAlign */ SI->getAlign().valueOrOne(), + /* SrcIsVolatile */ LI->isVolatile(), + /* DstIsVolatile */ SI->isVolatile(), TTI); SI->eraseFromParent(); LI->eraseFromParent(); diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -305,7 +305,7 @@ // of the previous load. LoadInst *newLoad = IRB.CreateLoad(OrigLoad->getType(), V, V->getName() + ".val"); - newLoad->setAlignment(MaybeAlign(OrigLoad->getAlignment())); + newLoad->setAlignment(OrigLoad->getAlign()); // Transfer the AA info too. AAMDNodes AAInfo; OrigLoad->getAAMetadata(AAInfo); diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp --- a/llvm/lib/Transforms/IPO/Attributor.cpp +++ b/llvm/lib/Transforms/IPO/Attributor.cpp @@ -5644,7 +5644,7 @@ constructPointer(PointeeTy->getPointerTo(), Base, PrivStructLayout->getElementOffset(u), IRB, DL); LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP); - L->setAlignment(MaybeAlign(1)); + L->setAlignment(Align(1)); ReplacementValues.push_back(L); } } else if (auto *PrivArrayType = dyn_cast(PrivType)) { @@ -5655,12 +5655,12 @@ Value *Ptr = constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL); LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP); - L->setAlignment(MaybeAlign(1)); + L->setAlignment(Align(1)); ReplacementValues.push_back(L); } } else { LoadInst *L = new LoadInst(PrivType, Base, "", IP); - L->setAlignment(MaybeAlign(1)); + L->setAlignment(Align(1)); ReplacementValues.push_back(L); } } diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -919,7 +919,7 @@ // Replace the cmp X, 0 with a use of the bool value. // Sink the load to where the compare was, if atomic rules allow us to. Value *LV = new LoadInst(InitBool->getValueType(), InitBool, - InitBool->getName() + ".val", false, None, + InitBool->getName() + ".val", false, Align(1), LI->getOrdering(), LI->getSyncScopeID(), LI->isUnordered() ? (Instruction *)ICI : LI); InitBoolUsed = true; @@ -1726,7 +1726,7 @@ assert(LI->getOperand(0) == GV && "Not a copy!"); // Insert a new load, to preserve the saved value. StoreVal = new LoadInst(NewGV->getValueType(), NewGV, - LI->getName() + ".b", false, None, + LI->getName() + ".b", false, Align(1), LI->getOrdering(), LI->getSyncScopeID(), LI); } else { assert((isa(StoredVal) || isa(StoredVal)) && @@ -1743,7 +1743,7 @@ // Change the load into a load of bool then a select. LoadInst *LI = cast(UI); LoadInst *NLI = new LoadInst(NewGV->getValueType(), NewGV, - LI->getName() + ".b", false, None, + LI->getName() + ".b", false, Align(1), LI->getOrdering(), LI->getSyncScopeID(), LI); Instruction *NSI; if (IsOneZero) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAtomicRMW.cpp @@ -150,8 +150,8 @@ Ordering != AtomicOrdering::Monotonic) return nullptr; - LoadInst *Load = new LoadInst(RMWI.getType(), RMWI.getPointerOperand()); - Load->setAtomic(Ordering, RMWI.getSyncScopeID()); - Load->setAlignment(MaybeAlign(DL.getABITypeAlignment(RMWI.getType()))); + LoadInst *Load = new LoadInst(RMWI.getType(), RMWI.getPointerOperand(), "", + false, DL.getABITypeAlign(RMWI.getType()), + Ordering, RMWI.getSyncScopeID()); return Load; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -194,8 +194,7 @@ Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); LoadInst *L = Builder.CreateLoad(IntType, Src); // Alignment from the mem intrinsic will be better, so use it. - L->setAlignment( - MaybeAlign(CopySrcAlign)); // FIXME: Check if we can use Align instead. + L->setAlignment(Align(CopySrcAlign)); if (CopyMD) L->setMetadata(LLVMContext::MD_tbaa, CopyMD); MDNode *LoopMemParallelMD = @@ -2324,8 +2323,8 @@ if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc0)))) && match(II->getArgOperand(1), m_OneUse(m_FPExt(m_Value(ExtSrc1)))) && ExtSrc0->getType() == ExtSrc1->getType()) { - Value *F = Intrinsic::getDeclaration(II->getModule(), II->getIntrinsicID(), - { ExtSrc0->getType() }); + Function *F = Intrinsic::getDeclaration( + II->getModule(), II->getIntrinsicID(), {ExtSrc0->getType()}); CallInst *NewCall = Builder.CreateCall(F, { ExtSrc0, ExtSrc1 }); NewCall->copyFastMathFlags(II); NewCall->takeName(II); @@ -2473,7 +2472,7 @@ &DT) >= 16) { Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(II->getType())); - return new LoadInst(II->getType(), Ptr); + return new LoadInst(II->getType(), Ptr, "", false, Align(16)); } break; case Intrinsic::ppc_vsx_lxvw4x: @@ -2519,7 +2518,7 @@ &DT) >= 32) { Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(II->getType())); - return new LoadInst(II->getType(), Ptr); + return new LoadInst(II->getType(), Ptr, "", false, Align(32)); } break; case Intrinsic::ppc_qpx_qvstfs: diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -295,7 +295,8 @@ if (auto *LT = dyn_cast(I)) { auto *V = getReplacement(LT->getPointerOperand()); assert(V && "Operand not replaced"); - auto *NewI = new LoadInst(I->getType(), V); + auto *NewI = new LoadInst(I->getType(), V, "", false, + IC.getDataLayout().getABITypeAlign(I->getType())); NewI->takeName(LT); IC.InsertNewInstWith(NewI, *LT); IC.replaceInstUsesWith(*LT, NewI); @@ -964,9 +965,9 @@ LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType()); if (KnownAlign > EffectiveLoadAlign) - LI.setAlignment(MaybeAlign(KnownAlign)); + LI.setAlignment(Align(KnownAlign)); else if (LoadAlign == 0) - LI.setAlignment(MaybeAlign(EffectiveLoadAlign)); + LI.setAlignment(Align(EffectiveLoadAlign)); // Replace GEP indices if possible. if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) { @@ -1023,7 +1024,7 @@ // if (SelectInst *SI = dyn_cast(Op)) { // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2). - const MaybeAlign Alignment(LI.getAlignment()); + auto Alignment(LI.getAlign()); if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(), Alignment, DL, SI) && isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(), diff --git a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp @@ -554,7 +554,9 @@ // visitLoadInst will propagate an alignment onto the load when TD is around, // and if TD isn't around, we can't handle the mixed case. bool isVolatile = FirstLI->isVolatile(); - MaybeAlign LoadAlignment(FirstLI->getAlignment()); + if (!FirstLI->getAlignment()) + return nullptr; + Align LoadAlignment(FirstLI->getAlignment()); unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace(); // We can't sink the load if the loaded value could be modified between the @@ -586,10 +588,10 @@ // If some of the loads have an alignment specified but not all of them, // we can't do the transformation. - if ((LoadAlignment.hasValue()) != (LI->getAlignment() != 0)) + if (!LI->getAlignment()) return nullptr; - LoadAlignment = std::min(LoadAlignment, MaybeAlign(LI->getAlignment())); + LoadAlignment = std::min(LoadAlignment, Align(LI->getAlignment())); // If the PHI is of volatile loads and the load block has multiple // successors, sinking it would remove a load of the volatile value from diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -1238,7 +1238,7 @@ } } - const MaybeAlign ShadowAlign(Align * DFS.ShadowWidth / 8); + const llvm::Align ShadowAlign(Align * DFS.ShadowWidth / 8); SmallVector Objs; GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout()); bool AllConstants = true; diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp --- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -327,7 +327,7 @@ LI->getPointerOperand(), SE); if (NewAlignment > LI->getAlignment()) { - LI->setAlignment(MaybeAlign(NewAlignment)); + LI->setAlignment(Align(NewAlignment)); ++NumLoadAlignChanged; } } else if (StoreInst *SI = dyn_cast(J)) { diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -1266,7 +1266,7 @@ auto *NewLoad = new LoadInst( LI->getType(), LoadPtr, LI->getName() + ".pre", LI->isVolatile(), - MaybeAlign(LI->getAlignment()), LI->getOrdering(), LI->getSyncScopeID(), + LI->getAlign(), LI->getOrdering(), LI->getSyncScopeID(), UnavailablePred->getTerminator()); NewLoad->setDebugLoc(LI->getDebugLoc()); diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -890,8 +890,8 @@ void updateAlignment(Instruction *I, Instruction *Repl) { if (auto *ReplacementLoad = dyn_cast(Repl)) { - ReplacementLoad->setAlignment(MaybeAlign(std::min( - ReplacementLoad->getAlignment(), cast(I)->getAlignment()))); + ReplacementLoad->setAlignment(std::min( + ReplacementLoad->getAlign(), cast(I)->getAlign())); ++NumLoadsRemoved; } else if (auto *ReplacementStore = dyn_cast(Repl)) { ReplacementStore->setAlignment( diff --git a/llvm/lib/Transforms/Scalar/JumpThreading.cpp b/llvm/lib/Transforms/Scalar/JumpThreading.cpp --- a/llvm/lib/Transforms/Scalar/JumpThreading.cpp +++ b/llvm/lib/Transforms/Scalar/JumpThreading.cpp @@ -1449,7 +1449,7 @@ "Can't handle critical edge here!"); LoadInst *NewVal = new LoadInst( LoadI->getType(), LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred), - LoadI->getName() + ".pr", false, MaybeAlign(LoadI->getAlignment()), + LoadI->getName() + ".pr", false, LoadI->getAlign(), LoadI->getOrdering(), LoadI->getSyncScopeID(), UnavailablePred->getTerminator()); NewVal->setDebugLoc(LoadI->getDebugLoc()); diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -2084,7 +2084,7 @@ SomePtr->getName() + ".promoted", Preheader->getTerminator()); if (SawUnorderedAtomic) PreheaderLoad->setOrdering(AtomicOrdering::Unordered); - PreheaderLoad->setAlignment(MaybeAlign(Alignment)); + PreheaderLoad->setAlignment(Align(Alignment)); PreheaderLoad->setDebugLoc(DL); if (AATags) PreheaderLoad->setAAMetadata(AATags); diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp --- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp +++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp @@ -436,8 +436,7 @@ PH->getTerminator()); Value *Initial = new LoadInst( Cand.Load->getType(), InitialPtr, "load_initial", - /* isVolatile */ false, MaybeAlign(Cand.Load->getAlignment()), - PH->getTerminator()); + /* isVolatile */ false, Cand.Load->getAlign(), PH->getTerminator()); PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded", &L->getHeader()->front()); diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -1275,7 +1275,7 @@ // matter which one we get and if any differ. AAMDNodes AATags; SomeLoad->getAAMetadata(AATags); - const MaybeAlign Align = MaybeAlign(SomeLoad->getAlignment()); + auto Align = SomeLoad->getAlign(); // Rewrite all loads of the PN to use the new PHI. while (!PN.use_empty()) { @@ -1373,8 +1373,8 @@ NumLoadsSpeculated += 2; // Transfer alignment and AA info if present. - TL->setAlignment(MaybeAlign(LI->getAlignment())); - FL->setAlignment(MaybeAlign(LI->getAlignment())); + TL->setAlignment(LI->getAlign()); + FL->setAlignment(LI->getAlign()); AAMDNodes Tags; LI->getAAMetadata(Tags); @@ -2445,14 +2445,10 @@ /// /// You can optionally pass a type to this routine and if that type's ABI /// alignment is itself suitable, this will return zero. - MaybeAlign getSliceAlign(Type *Ty = nullptr) { - const MaybeAlign NewAIAlign = DL.getValueOrABITypeAlignment( + Align getSliceAlign() { + Align NewAIAlign = DL.getValueOrABITypeAlignment( MaybeAlign(NewAI.getAlignment()), NewAI.getAllocatedType()); - const MaybeAlign Align = - commonAlignment(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset); - return (Ty && Align && Align->value() == DL.getABITypeAlignment(Ty)) - ? None - : Align; + return commonAlignment(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset); } unsigned getIndex(uint64_t Offset) { @@ -2569,7 +2565,7 @@ } else { Type *LTy = TargetTy->getPointerTo(AS); LoadInst *NewLI = IRB.CreateAlignedLoad( - TargetTy, getNewAllocaSlicePtr(IRB, LTy), getSliceAlign(TargetTy), + TargetTy, getNewAllocaSlicePtr(IRB, LTy), getSliceAlign(), LI.isVolatile(), LI.getName()); if (AATags) NewLI->setAAMetadata(AATags); @@ -2596,7 +2592,8 @@ // the computed value, and then replace the placeholder with LI, leaving // LI only used for this computation. Value *Placeholder = new LoadInst( - LI.getType(), UndefValue::get(LI.getType()->getPointerTo(AS))); + LI.getType(), UndefValue::get(LI.getType()->getPointerTo(AS)), "", + false, Align(1)); V = insertInteger(DL, IRB, Placeholder, V, NewBeginOffset - BeginOffset, "insert"); LI.replaceAllUsesWith(V); @@ -2721,7 +2718,7 @@ } else { unsigned AS = SI.getPointerAddressSpace(); Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo(AS)); - NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()), + NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(), SI.isVolatile()); } NewSI->copyMetadata(SI, {LLVMContext::MD_mem_parallel_loop_access, @@ -3140,14 +3137,14 @@ Instruction *I = Uses.pop_back_val(); if (LoadInst *LI = dyn_cast(I)) { - MaybeAlign LoadAlign = DL.getValueOrABITypeAlignment( - MaybeAlign(LI->getAlignment()), LI->getType()); + Align LoadAlign = DL.getValueOrABITypeAlignment(LI->getAlign(), + LI->getType()); LI->setAlignment(std::min(LoadAlign, getSliceAlign())); continue; } if (StoreInst *SI = dyn_cast(I)) { Value *Op = SI->getOperand(0); - MaybeAlign StoreAlign = DL.getValueOrABITypeAlignment( + Align StoreAlign = DL.getValueOrABITypeAlignment( MaybeAlign(SI->getAlignment()), Op->getType()); SI->setAlignment(std::min(StoreAlign, getSliceAlign())); continue; diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp --- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp +++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp @@ -1169,9 +1169,9 @@ Output = ReloadOutputs[i]; } LoadInst *load = new LoadInst(outputs[i]->getType(), Output, - outputs[i]->getName() + ".reload"); + outputs[i]->getName() + ".reload", + codeReplacer); Reloads.push_back(load); - codeReplacer->getInstList().push_back(load); std::vector Users(outputs[i]->user_begin(), outputs[i]->user_end()); for (unsigned u = 0, e = Users.size(); u != e; ++u) { Instruction *inst = cast(Users[u]); diff --git a/llvm/lib/Transforms/Utils/VNCoercion.cpp b/llvm/lib/Transforms/Utils/VNCoercion.cpp --- a/llvm/lib/Transforms/Utils/VNCoercion.cpp +++ b/llvm/lib/Transforms/Utils/VNCoercion.cpp @@ -512,7 +512,7 @@ PtrVal = Builder.CreateBitCast(PtrVal, DestPTy); LoadInst *NewLoad = Builder.CreateLoad(DestTy, PtrVal); NewLoad->takeName(SrcVal); - NewLoad->setAlignment(MaybeAlign(SrcVal->getAlignment())); + NewLoad->setAlignment(SrcVal->getAlign()); LLVM_DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n"); LLVM_DEBUG(dbgs() << "TO: " << *NewLoad << "\n"); diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -4367,11 +4367,9 @@ if (getTreeEntry(PO)) ExternalUses.push_back(ExternalUser(PO, cast(VecPtr), 0)); - MaybeAlign Alignment = MaybeAlign(LI->getAlignment()); - LI = Builder.CreateLoad(VecTy, VecPtr); - if (!Alignment) - Alignment = MaybeAlign(DL->getABITypeAlignment(ScalarLoadTy)); - LI->setAlignment(Alignment); + Align Alignment = DL->getValueOrABITypeAlignment(LI->getAlign(), + ScalarLoadTy); + LI = Builder.CreateAlignedLoad(VecTy, VecPtr, Alignment); Value *V = propagateMetadata(LI, E->Scalars); if (IsReorder) { OrdersType Mask; diff --git a/polly/lib/CodeGen/IslNodeBuilder.cpp b/polly/lib/CodeGen/IslNodeBuilder.cpp --- a/polly/lib/CodeGen/IslNodeBuilder.cpp +++ b/polly/lib/CodeGen/IslNodeBuilder.cpp @@ -1213,8 +1213,7 @@ Ptr = Builder.CreatePointerCast(Ptr, Ty->getPointerTo(AS), Name + ".cast"); PreloadVal = Builder.CreateLoad(Ptr, Name + ".load"); if (LoadInst *PreloadInst = dyn_cast(PreloadVal)) - PreloadInst->setAlignment( - MaybeAlign(dyn_cast(AccInst)->getAlignment())); + PreloadInst->setAlignment(cast(AccInst)->getAlign()); // TODO: This is only a hot fix for SCoP sequences that use the same load // instruction contained and hoisted by one of the SCoPs.