diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -1759,8 +1759,10 @@ /// parameter. /// FIXME: Remove this function once transition to Align is over. /// Use the version that takes MaybeAlign instead of this one. - LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align, - const char *Name) { + LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, + unsigned Align, + const char *Name), + "Use the version that takes NaybeAlign instead") { return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name); } LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, @@ -1771,8 +1773,10 @@ } /// FIXME: Remove this function once transition to Align is over. /// Use the version that takes MaybeAlign instead of this one. - LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align, - const Twine &Name = "") { + LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, + unsigned Align, + const Twine &Name = ""), + "Use the version that takes MaybeAlign instead") { return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), Name); } LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, @@ -1783,8 +1787,11 @@ } /// FIXME: Remove this function once transition to Align is over. /// Use the version that takes MaybeAlign instead of this one. - LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, unsigned Align, - bool isVolatile, const Twine &Name = "") { + LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, + unsigned Align, + bool isVolatile, + const Twine &Name = ""), + "Use the version that takes MaybeAlign instead") { return CreateAlignedLoad(Ty, Ptr, MaybeAlign(Align), isVolatile, Name); } LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, @@ -1797,19 +1804,19 @@ // Deprecated [opaque pointer types] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) { return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr, - Align, Name); + MaybeAlign(Align), Name); } // Deprecated [opaque pointer types] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const Twine &Name = "") { return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr, - Align, Name); + MaybeAlign(Align), Name); } // Deprecated [opaque pointer types] LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile, const Twine &Name = "") { return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr, - Align, isVolatile, Name); + MaybeAlign(Align), isVolatile, Name); } // Deprecated [opaque pointer types] LoadInst *CreateAlignedLoad(Value *Ptr, MaybeAlign Align, const char *Name) { diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -109,8 +109,12 @@ /// Return the alignment of the memory that is being allocated by the /// instruction. + MaybeAlign getAlign() const { + return decodeMaybeAlign(getSubclassDataFromInstruction() & 31); + } + // FIXME: Remove this one transition to Align is over. unsigned getAlignment() const { - if (const auto MA = decodeMaybeAlign(getSubclassDataFromInstruction() & 31)) + if (const auto MA = getAlign()) return MA->value(); return 0; } diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -1622,7 +1622,7 @@ bool UseSizedLibcall = canUseSizedAtomicCall(Size, Align, DL); Type *SizedIntTy = Type::getIntNTy(Ctx, Size * 8); - unsigned AllocaAlignment = DL.getPrefTypeAlignment(SizedIntTy); + const llvm::Align AllocaAlignment(DL.getPrefTypeAlignment(SizedIntTy)); // TODO: the "order" argument type is "int", not int32. So // getInt32Ty may be wrong if the arch uses e.g. 16-bit ints. @@ -1712,14 +1712,15 @@ // 'expected' argument, if present. if (CASExpected) { AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType()); - AllocaCASExpected->setAlignment(MaybeAlign(AllocaAlignment)); + AllocaCASExpected->setAlignment(AllocaAlignment); unsigned AllocaAS = AllocaCASExpected->getType()->getPointerAddressSpace(); AllocaCASExpected_i8 = Builder.CreateBitCast(AllocaCASExpected, Type::getInt8PtrTy(Ctx, AllocaAS)); Builder.CreateLifetimeStart(AllocaCASExpected_i8, SizeVal64); - Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment); + Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, + AllocaAlignment.value()); Args.push_back(AllocaCASExpected_i8); } @@ -1731,11 +1732,12 @@ Args.push_back(IntValue); } else { AllocaValue = AllocaBuilder.CreateAlloca(ValueOperand->getType()); - AllocaValue->setAlignment(MaybeAlign(AllocaAlignment)); + AllocaValue->setAlignment(AllocaAlignment); AllocaValue_i8 = Builder.CreateBitCast(AllocaValue, Type::getInt8PtrTy(Ctx)); Builder.CreateLifetimeStart(AllocaValue_i8, SizeVal64); - Builder.CreateAlignedStore(ValueOperand, AllocaValue, AllocaAlignment); + Builder.CreateAlignedStore(ValueOperand, AllocaValue, + AllocaAlignment.value()); Args.push_back(AllocaValue_i8); } } @@ -1743,7 +1745,7 @@ // 'ret' argument. if (!CASExpected && HasResult && !UseSizedLibcall) { AllocaResult = AllocaBuilder.CreateAlloca(I->getType()); - AllocaResult->setAlignment(MaybeAlign(AllocaAlignment)); + AllocaResult->setAlignment(AllocaAlignment); unsigned AllocaAS = AllocaResult->getType()->getPointerAddressSpace(); AllocaResult_i8 = Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx, AllocaAS)); diff --git a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp --- a/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp +++ b/llvm/lib/CodeGen/InterleavedLoadCombinePass.cpp @@ -1220,7 +1220,7 @@ "interleaved.wide.ptrcast"); // Create the wide load and update the MemorySSA. - auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlignment(), + auto LI = Builder.CreateAlignedLoad(ILTy, CI, InsertionPoint->getAlign(), "interleaved.wide.load"); auto MSSAU = MemorySSAUpdater(&MSSA); MemoryUse *MSSALoad = cast(MSSAU.createMemoryAccessBefore( diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp --- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp +++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp @@ -46,7 +46,7 @@ Value *OffsetPtr = B.CreateGEP(Int8Ty, CI->getArgOperand(0), CI->getArgOperand(1)); Value *OffsetPtrI32 = B.CreateBitCast(OffsetPtr, Int32PtrTy); - Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtrI32, 4); + Value *OffsetI32 = B.CreateAlignedLoad(Int32Ty, OffsetPtrI32, Align(4)); Value *ResultPtr = B.CreateGEP(Int8Ty, CI->getArgOperand(0), OffsetI32); diff --git a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp --- a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp +++ b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp @@ -130,7 +130,7 @@ Value *Mask = CI->getArgOperand(2); Value *Src0 = CI->getArgOperand(3); - unsigned AlignVal = cast(Alignment)->getZExtValue(); + const Align AlignVal = cast(Alignment)->getAlignValue(); VectorType *VecType = cast(CI->getType()); Type *EltTy = VecType->getElementType(); @@ -151,7 +151,8 @@ } // Adjust alignment for the scalar instruction. - AlignVal = MinAlign(AlignVal, EltTy->getPrimitiveSizeInBits() / 8); + const Align AdjustedAlignVal = + commonAlignment(AlignVal, EltTy->getPrimitiveSizeInBits() / 8); // Bitcast %addr from i8* to EltTy* Type *NewPtrType = EltTy->getPointerTo(Ptr->getType()->getPointerAddressSpace()); @@ -166,7 +167,7 @@ if (cast(Mask)->getAggregateElement(Idx)->isNullValue()) continue; Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx); - LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal); + LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal); VResult = Builder.CreateInsertElement(VResult, Load, Idx); } CI->replaceAllUsesWith(VResult); @@ -210,7 +211,7 @@ Builder.SetInsertPoint(InsertPt); Value *Gep = Builder.CreateConstInBoundsGEP1_32(EltTy, FirstEltPtr, Idx); - LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AlignVal); + LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Gep, AdjustedAlignVal); Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx); // Create "else" block, fill it in the next iteration @@ -414,8 +415,8 @@ if (cast(Mask)->getAggregateElement(Idx)->isNullValue()) continue; Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx)); - LoadInst *Load = - Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx)); + LoadInst *Load = Builder.CreateAlignedLoad( + EltTy, Ptr, MaybeAlign(AlignVal), "Load" + Twine(Idx)); VResult = Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx)); } @@ -459,8 +460,8 @@ Builder.SetInsertPoint(InsertPt); Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx)); - LoadInst *Load = - Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx)); + LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, MaybeAlign(AlignVal), + "Load" + Twine(Idx)); Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx)); @@ -624,8 +625,8 @@ if (cast(Mask)->getAggregateElement(Idx)->isNullValue()) continue; Value *NewPtr = Builder.CreateConstInBoundsGEP1_32(EltTy, Ptr, MemIndex); - LoadInst *Load = - Builder.CreateAlignedLoad(EltTy, NewPtr, 1, "Load" + Twine(Idx)); + LoadInst *Load = Builder.CreateAlignedLoad(EltTy, NewPtr, Align(1), + "Load" + Twine(Idx)); VResult = Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx)); ++MemIndex; @@ -670,7 +671,7 @@ "cond.load"); Builder.SetInsertPoint(InsertPt); - LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, 1); + LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, Align(1)); Value *NewVResult = Builder.CreateInsertElement(VResult, Load, Idx); // Move the pointer if there are more blocks to come. diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -2308,7 +2308,7 @@ Type *VT = VectorType::get(EltTy, NumSrcElts); Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0), PointerType::getUnqual(VT)); - Value *Load = Builder.CreateAlignedLoad(VT, Op, 1); + Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1)); if (NumSrcElts == 2) Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), { 0, 1, 0, 1 }); @@ -3054,7 +3054,8 @@ // Convert the type of the pointer to a pointer to the stored type. Value *BC = Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast"); - LoadInst *LI = Builder.CreateAlignedLoad(VTy, BC, VTy->getBitWidth() / 8); + LoadInst *LI = + Builder.CreateAlignedLoad(VTy, BC, Align(VTy->getBitWidth() / 8)); LI->setMetadata(M->getMDKindID("nontemporal"), Node); Rep = LI; } else if (IsX86 && (Name.startswith("fma.vfmadd.") || diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp @@ -160,7 +160,7 @@ ArgPtr = Builder.CreateBitCast(ArgPtr, AdjustedArgTy->getPointerTo(AS), ArgPtr->getName() + ".cast"); LoadInst *Load = - Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign.value()); + Builder.CreateAlignedLoad(AdjustedArgTy, ArgPtr, AdjustedAlign); Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {})); MDBuilder MDB(Ctx); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -251,10 +251,10 @@ // 32-bit and extract sequence is already present, and it is probably easier // to CSE this. The loads should be mergable later anyway. Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1); - LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, 4); + LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4)); Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2); - LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, 4); + LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4)); MDNode *MD = MDNode::get(Mod->getContext(), None); LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD); diff --git a/llvm/lib/Target/ARM/ARMParallelDSP.cpp b/llvm/lib/Target/ARM/ARMParallelDSP.cpp --- a/llvm/lib/Target/ARM/ARMParallelDSP.cpp +++ b/llvm/lib/Target/ARM/ARMParallelDSP.cpp @@ -772,8 +772,7 @@ const unsigned AddrSpace = DomLoad->getPointerAddressSpace(); Value *VecPtr = IRB.CreateBitCast(Base->getPointerOperand(), LoadTy->getPointerTo(AddrSpace)); - LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr, - Base->getAlignment()); + LoadInst *WideLoad = IRB.CreateAlignedLoad(LoadTy, VecPtr, Base->getAlign()); // Make sure everything is in the correct order in the basic block. MoveBefore(Base->getPointerOperand(), VecPtr); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -27474,7 +27474,7 @@ // Finally we can emit the atomic load. LoadInst *Loaded = Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(), - AI->getType()->getPrimitiveSizeInBits()); + Align(AI->getType()->getPrimitiveSizeInBits())); Loaded->setAtomic(Order, SSID); AI->replaceAllUsesWith(Loaded); AI->eraseFromParent(); diff --git a/llvm/lib/Target/X86/X86InterleavedAccess.cpp b/llvm/lib/Target/X86/X86InterleavedAccess.cpp --- a/llvm/lib/Target/X86/X86InterleavedAccess.cpp +++ b/llvm/lib/Target/X86/X86InterleavedAccess.cpp @@ -216,7 +216,7 @@ Value *NewBasePtr = Builder.CreateGEP(VecBaseTy, VecBasePtr, Builder.getInt32(i)); Instruction *NewLoad = - Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, LI->getAlignment()); + Builder.CreateAlignedLoad(VecBaseTy, NewBasePtr, LI->getAlign()); DecomposedVectors.push_back(NewLoad); } } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1056,7 +1056,8 @@ // * Narrow width by halfs excluding zero/undef lanes Value *InstCombiner::simplifyMaskedLoad(IntrinsicInst &II) { Value *LoadPtr = II.getArgOperand(0); - unsigned Alignment = cast(II.getArgOperand(1))->getZExtValue(); + const Align Alignment = + cast(II.getArgOperand(1))->getAlignValue(); // If the mask is all ones or undefs, this is a plain vector load of the 1st // argument. @@ -1066,9 +1067,9 @@ // If we can unconditionally load from this address, replace with a // load/select idiom. TODO: use DT for context sensitive query - if (isDereferenceableAndAlignedPointer( - LoadPtr, II.getType(), MaybeAlign(Alignment), - II.getModule()->getDataLayout(), &II, nullptr)) { + if (isDereferenceableAndAlignedPointer(LoadPtr, II.getType(), Alignment, + II.getModule()->getDataLayout(), &II, + nullptr)) { Value *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, "unmaskedload"); return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3)); @@ -1459,7 +1460,7 @@ auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0), PointerType::get(II.getType(), 0)); - return Builder.CreateAlignedLoad(II.getType(), BCastInst, Alignment); + return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment)); } // Returns true iff the 2 intrinsics have the same operands, limiting the diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -462,12 +462,11 @@ NewPtr->getType()->getPointerAddressSpace() == AS)) NewPtr = Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)); - unsigned Align = LI.getAlignment(); - if (!Align) // If old load did not have an explicit alignment specified, // manually preserve the implied (ABI) alignment of the load. // Else we may inadvertently incorrectly over-promise alignment. - Align = getDataLayout().getABITypeAlignment(LI.getType()); + const llvm::Align Align = + getDataLayout().getValueOrABITypeAlignment(LI.getAlign(), LI.getType()); LoadInst *NewLoad = Builder.CreateAlignedLoad( NewTy, NewPtr, Align, LI.isVolatile(), LI.getName() + Suffix); @@ -674,9 +673,7 @@ if (SL->hasPadding()) return nullptr; - auto Align = LI.getAlignment(); - if (!Align) - Align = DL.getABITypeAlignment(ST); + const auto Align = DL.getValueOrABITypeAlignment(LI.getAlign(), ST); auto *Addr = LI.getPointerOperand(); auto *IdxType = Type::getInt32Ty(T->getContext()); @@ -690,9 +687,9 @@ }; auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), Name + ".elt"); - auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); - auto *L = IC.Builder.CreateAlignedLoad(ST->getElementType(i), Ptr, - EltAlign, Name + ".unpack"); + auto *L = IC.Builder.CreateAlignedLoad( + ST->getElementType(i), Ptr, + commonAlignment(Align, SL->getElementOffset(i)), Name + ".unpack"); // Propagate AA metadata. It'll still be valid on the narrowed load. AAMDNodes AAMD; LI.getAAMetadata(AAMD); @@ -725,9 +722,7 @@ const DataLayout &DL = IC.getDataLayout(); auto EltSize = DL.getTypeAllocSize(ET); - auto Align = LI.getAlignment(); - if (!Align) - Align = DL.getABITypeAlignment(T); + const auto Align = DL.getValueOrABITypeAlignment(LI.getAlign(), T); auto *Addr = LI.getPointerOperand(); auto *IdxType = Type::getInt64Ty(T->getContext()); @@ -742,8 +737,9 @@ }; auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), Name + ".elt"); - auto *L = IC.Builder.CreateAlignedLoad( - AT->getElementType(), Ptr, MinAlign(Align, Offset), Name + ".unpack"); + auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr, + commonAlignment(Align, Offset), + Name + ".unpack"); AAMDNodes AAMD; LI.getAAMetadata(AAMD); L->setAAMetadata(AAMD); diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -1194,7 +1194,7 @@ } } - uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8; + const MaybeAlign ShadowAlign(Align * DFS.ShadowWidth / 8); SmallVector Objs; GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout()); bool AllConstants = true; @@ -1216,7 +1216,7 @@ return DFS.ZeroShadow; case 1: { LoadInst *LI = new LoadInst(DFS.ShadowTy, ShadowAddr, "", Pos); - LI->setAlignment(MaybeAlign(ShadowAlign)); + LI->setAlignment(ShadowAlign); return LI; } case 2: { diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1643,8 +1643,8 @@ // ParamTLS overflow. *ShadowPtr = getCleanShadow(V); } else { - *ShadowPtr = EntryIRB.CreateAlignedLoad( - getShadowTy(&FArg), Base, kShadowTLSAlignment.value()); + *ShadowPtr = EntryIRB.CreateAlignedLoad(getShadowTy(&FArg), Base, + kShadowTLSAlignment); } } LLVM_DEBUG(dbgs() @@ -1783,8 +1783,8 @@ if (PropagateShadow) { std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false); - setShadow(&I, IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, - Alignment.value(), "_msld")); + setShadow(&I, + IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld")); } else { setShadow(&I, getCleanShadow(&I)); } @@ -1798,8 +1798,8 @@ if (MS.TrackOrigins) { if (PropagateShadow) { const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment); - setOrigin(&I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr, - OriginAlignment.value())); + setOrigin( + &I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr, OriginAlignment)); } else { setOrigin(&I, getCleanOrigin()); } @@ -2481,8 +2481,8 @@ const Align Alignment = Align::None(); std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false); - setShadow(&I, IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, - Alignment.value(), "_msld")); + setShadow(&I, + IRB.CreateAlignedLoad(ShadowTy, ShadowPtr, Alignment, "_msld")); } else { setShadow(&I, getCleanShadow(&I)); } @@ -2893,8 +2893,7 @@ if (ClCheckAccessAddress) insertShadowCheck(Addr, &I); - Value *Shadow = - IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment.value(), "_ldmxcsr"); + Value *Shadow = IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment, "_ldmxcsr"); Value *Origin = MS.TrackOrigins ? IRB.CreateLoad(MS.OriginTy, OriginPtr) : getCleanOrigin(); insertShadowCheck(Shadow, Origin, &I); @@ -3381,7 +3380,7 @@ IRBuilder<> IRBAfter(&*NextInsn); Value *RetvalShadow = IRBAfter.CreateAlignedLoad( getShadowTy(&I), getShadowPtrForRetval(&I, IRBAfter), - kShadowTLSAlignment.value(), "_msret"); + kShadowTLSAlignment, "_msret"); setShadow(&I, RetvalShadow); if (MS.TrackOrigins) setOrigin(&I, IRBAfter.CreateLoad(MS.OriginTy, diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -2460,7 +2460,7 @@ assert(EndIndex > BeginIndex && "Empty vector!"); Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, - NewAI.getAlignment(), "load"); + NewAI.getAlign(), "load"); return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); } @@ -2468,7 +2468,7 @@ assert(IntTy && "We cannot insert an integer to the alloca"); assert(!LI.isVolatile()); Value *V = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, - NewAI.getAlignment(), "load"); + NewAI.getAlign(), "load"); V = convertValue(DL, IRB, V, IntTy); assert(NewBeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; @@ -2513,8 +2513,8 @@ (IsLoadPastEnd && NewAllocaTy->isIntegerTy() && TargetTy->isIntegerTy()))) { LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, - NewAI.getAlignment(), - LI.isVolatile(), LI.getName()); + NewAI.getAlign(), LI.isVolatile(), + LI.getName()); if (AATags) NewLI->setAAMetadata(AATags); if (LI.isVolatile()) @@ -2609,7 +2609,7 @@ // Mix in the existing elements. Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, - NewAI.getAlignment(), "load"); + NewAI.getAlign(), "load"); V = insertVector(IRB, Old, V, BeginIndex, "vec"); } StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); @@ -2626,7 +2626,7 @@ assert(!SI.isVolatile()); if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, - NewAI.getAlignment(), "oldload"); + NewAI.getAlign(), "oldload"); Old = convertValue(DL, IRB, Old, IntTy); assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); uint64_t Offset = BeginOffset - NewAllocaBeginOffset; @@ -2829,7 +2829,7 @@ Splat = getVectorSplat(Splat, NumElements); Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, - NewAI.getAlignment(), "oldload"); + NewAI.getAlign(), "oldload"); V = insertVector(IRB, Old, Splat, BeginIndex, "vec"); } else if (IntTy) { // If this is a memset on an alloca where we can widen stores, insert the @@ -2842,7 +2842,7 @@ if (IntTy && (BeginOffset != NewAllocaBeginOffset || EndOffset != NewAllocaBeginOffset)) { Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, - NewAI.getAlignment(), "oldload"); + NewAI.getAlign(), "oldload"); Old = convertValue(DL, IRB, Old, IntTy); uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; V = insertInteger(DL, IRB, Old, V, Offset, "insert"); @@ -3028,11 +3028,11 @@ Value *Src; if (VecTy && !IsWholeAlloca && !IsDest) { Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, - NewAI.getAlignment(), "load"); + NewAI.getAlign(), "load"); Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec"); } else if (IntTy && !IsWholeAlloca && !IsDest) { Src = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, - NewAI.getAlignment(), "load"); + NewAI.getAlign(), "load"); Src = convertValue(DL, IRB, Src, IntTy); uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); @@ -3046,11 +3046,11 @@ if (VecTy && !IsWholeAlloca && IsDest) { Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, - NewAI.getAlignment(), "oldload"); + NewAI.getAlign(), "oldload"); Src = insertVector(IRB, Old, Src, BeginIndex, "vec"); } else if (IntTy && !IsWholeAlloca && IsDest) { Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, - NewAI.getAlignment(), "oldload"); + NewAI.getAlign(), "oldload"); Old = convertValue(DL, IRB, Old, IntTy); uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; Src = insertInteger(DL, IRB, Old, Src, Offset, "insert"); @@ -3357,7 +3357,7 @@ Value *GEP = IRB.CreateInBoundsGEP(BaseTy, Ptr, GEPIndices, Name + ".gep"); LoadInst *Load = - IRB.CreateAlignedLoad(Ty, GEP, Alignment.value(), Name + ".load"); + IRB.CreateAlignedLoad(Ty, GEP, Alignment, Name + ".load"); if (AATags) Load->setAAMetadata(AATags); Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); @@ -3860,7 +3860,7 @@ getAdjustedPtr(IRB, DL, BasePtr, APInt(DL.getIndexSizeInBits(AS), PartOffset), PartPtrTy, BasePtr->getName() + "."), - getAdjustedAlignment(LI, PartOffset, DL).value(), + getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, LI->getName()); PLoad->copyMetadata(*LI, {LLVMContext::MD_mem_parallel_loop_access, LLVMContext::MD_access_group}); @@ -4003,7 +4003,7 @@ getAdjustedPtr(IRB, DL, LoadBasePtr, APInt(DL.getIndexSizeInBits(AS), PartOffset), LoadPartPtrTy, LoadBasePtr->getName() + "."), - getAdjustedAlignment(LI, PartOffset, DL).value(), + getAdjustedAlignment(LI, PartOffset, DL), /*IsVolatile*/ false, LI->getName()); } diff --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp --- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp +++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp @@ -802,7 +802,7 @@ for (unsigned I = 0; I < NumElems; ++I) Res[I] = Builder.CreateAlignedLoad(Layout.VecTy->getElementType(), Ptr[I], - Layout.getElemAlign(I), + Align(Layout.getElemAlign(I)), LI.getName() + ".i" + Twine(I)); gather(&LI, Res); return true; diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -1194,7 +1194,8 @@ Value *Bitcast = Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS)); - LoadInst *LI = Builder.CreateAlignedLoad(VecTy, Bitcast, Alignment); + LoadInst *LI = + Builder.CreateAlignedLoad(VecTy, Bitcast, MaybeAlign(Alignment)); propagateMetadata(LI, Chain); if (VecLoadTy) { diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2268,7 +2268,7 @@ } else NewLoad = Builder.CreateAlignedLoad(VecTy, AddrParts[Part], - Group->getAlignment(), "wide.vec"); + Group->getAlign(), "wide.vec"); Group->addMetadata(NewLoad); NewLoads.push_back(NewLoad); } @@ -2478,8 +2478,8 @@ VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy), "wide.masked.load"); else - NewLI = Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment.value(), - "wide.load"); + NewLI = + Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment, "wide.load"); // Add metadata to the load, but setVectorValue to the reverse shuffle. addMetadata(NewLI, LI); diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -4150,7 +4150,7 @@ Builder.SetInsertPoint(LI); PointerType *PtrTy = PointerType::get(VecTy, LI->getPointerAddressSpace()); Value *Ptr = Builder.CreateBitCast(LI->getOperand(0), PtrTy); - LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlignment()); + LoadInst *V = Builder.CreateAlignedLoad(VecTy, Ptr, LI->getAlign()); Value *NewV = propagateMetadata(V, E->Scalars); if (!E->ReorderIndices.empty()) { OrdersType Mask;