diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst --- a/llvm/docs/ReleaseNotes.rst +++ b/llvm/docs/ReleaseNotes.rst @@ -64,6 +64,12 @@ provided by the attribute is interfaced via the API provided by the ``VFDatabase`` class. +* `dereferenceable` attributes and metadata on pointers no longer imply + anything about the alignment of the pointer in question. Previously, some + optimizations would make assumptions based on the type of the pointer. This + behavior was undocumented. To preserve optimizations, frontends may need to + be updated to generate appropriate `align` attributes and metadata. + Changes to building LLVM ------------------------ diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h --- a/llvm/include/llvm/IR/Value.h +++ b/llvm/include/llvm/IR/Value.h @@ -664,7 +664,7 @@ /// /// Returns an alignment which is either specified explicitly, e.g. via /// align attribute of a function argument, or guaranteed by DataLayout. - MaybeAlign getPointerAlignment(const DataLayout &DL) const; + Align getPointerAlignment(const DataLayout &DL) const; /// Translate PHI node to its predecessor from the given basic block. /// diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -27,24 +27,12 @@ using namespace llvm; -static MaybeAlign getBaseAlign(const Value *Base, const DataLayout &DL) { - if (const MaybeAlign PA = Base->getPointerAlignment(DL)) - return *PA; - Type *const Ty = Base->getType()->getPointerElementType(); - if (!Ty->isSized()) - return None; - return Align(DL.getABITypeAlignment(Ty)); -} - static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment, const DataLayout &DL) { - if (MaybeAlign BA = getBaseAlign(Base, DL)) { - const APInt APBaseAlign(Offset.getBitWidth(), BA->value()); - const APInt APAlign(Offset.getBitWidth(), Alignment.value()); - assert(APAlign.isPowerOf2() && "must be a power of 2!"); - return APBaseAlign.uge(APAlign) && !(Offset & (APAlign - 1)); - } - return false; + Align BA = Base->getPointerAlignment(DL); + const APInt APAlign(Offset.getBitWidth(), Alignment.value()); + assert(APAlign.isPowerOf2() && "must be a power of 2!"); + return BA >= Alignment && !(Offset & (APAlign - 1)); } /// Test if V is always a pointer to allocated and suitably aligned memory for diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -2008,9 +2008,8 @@ // Aligned pointers have trailing zeros - refine Known.Zero set if (isa(V->getType())) { - const MaybeAlign Align = V->getPointerAlignment(Q.DL); - if (Align) - Known.Zero.setLowBits(countTrailingZeros(Align->value())); + Align Alignment = V->getPointerAlignment(Q.DL); + Known.Zero.setLowBits(countTrailingZeros(Alignment.value())); } // computeKnownBitsFromAssume strictly refines Known. diff --git a/llvm/lib/CodeGen/ExpandMemCmp.cpp b/llvm/lib/CodeGen/ExpandMemCmp.cpp --- a/llvm/lib/CodeGen/ExpandMemCmp.cpp +++ b/llvm/lib/CodeGen/ExpandMemCmp.cpp @@ -273,8 +273,8 @@ // Get the memory source at offset `OffsetBytes`. Value *LhsSource = CI->getArgOperand(0); Value *RhsSource = CI->getArgOperand(1); - Align LhsAlign = LhsSource->getPointerAlignment(DL).valueOrOne(); - Align RhsAlign = RhsSource->getPointerAlignment(DL).valueOrOne(); + Align LhsAlign = LhsSource->getPointerAlignment(DL); + Align RhsAlign = RhsSource->getPointerAlignment(DL); if (OffsetBytes > 0) { auto *ByteType = Type::getInt8Ty(CI->getContext()); LhsSource = Builder.CreateConstGEP1_64( diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -1210,7 +1210,8 @@ MaybeAlign GVAlign; if (Module *TheModule = GV->getParent()) { - GVAlign = GV->getPointerAlignment(TheModule->getDataLayout()); + const DataLayout &DL = TheModule->getDataLayout(); + GVAlign = GV->getPointerAlignment(DL); // If the function alignment is not specified then assume that it // is 4. @@ -1221,7 +1222,7 @@ // increased code size (see https://reviews.llvm.org/D55115) // FIXME: This code should be deleted once existing targets have // appropriate defaults - if (!GVAlign && isa(GV)) + if (isa(GV) && !DL.getFunctionPtrAlign()) GVAlign = Align(4); } else if (isa(GV)) { // Without a datalayout we have to assume the worst case: that the diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -738,16 +738,16 @@ return DerefBytes; } -MaybeAlign Value::getPointerAlignment(const DataLayout &DL) const { +Align Value::getPointerAlignment(const DataLayout &DL) const { assert(getType()->isPointerTy() && "must be pointer"); if (auto *GO = dyn_cast(this)) { if (isa(GO)) { - const MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign(); + Align FunctionPtrAlign = DL.getFunctionPtrAlign().valueOrOne(); switch (DL.getFunctionPtrAlignType()) { case DataLayout::FunctionPtrAlignType::Independent: return FunctionPtrAlign; case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign: - return std::max(FunctionPtrAlign, MaybeAlign(GO->getAlignment())); + return std::max(FunctionPtrAlign, GO->getAlign().valueOrOne()); } llvm_unreachable("Unhandled FunctionPtrAlignType"); } @@ -760,13 +760,13 @@ // it the preferred alignment. Otherwise, we have to assume that it // may only have the minimum ABI alignment. if (GVar->isStrongDefinitionForLinker()) - return MaybeAlign(DL.getPreferredAlignment(GVar)); + return Align(DL.getPreferredAlignment(GVar)); else return DL.getABITypeAlign(ObjectType); } } } - return Alignment; + return Alignment.valueOrOne(); } else if (const Argument *A = dyn_cast(this)) { const MaybeAlign Alignment = A->getParamAlign(); if (!Alignment && A->hasStructRetAttr()) { @@ -775,25 +775,18 @@ if (EltTy->isSized()) return DL.getABITypeAlign(EltTy); } - return Alignment; + return Alignment.valueOrOne(); } else if (const AllocaInst *AI = dyn_cast(this)) { - const MaybeAlign Alignment = AI->getAlign(); - if (!Alignment) { - Type *AllocatedType = AI->getAllocatedType(); - if (AllocatedType->isSized()) - return MaybeAlign(DL.getPrefTypeAlignment(AllocatedType)); - } - return Alignment; + return AI->getAlign(); } else if (const auto *Call = dyn_cast(this)) { - const MaybeAlign Alignment = Call->getRetAlign(); + MaybeAlign Alignment = Call->getRetAlign(); if (!Alignment && Call->getCalledFunction()) - return MaybeAlign( - Call->getCalledFunction()->getAttributes().getRetAlignment()); - return Alignment; + Alignment = Call->getCalledFunction()->getAttributes().getRetAlignment(); + return Alignment.valueOrOne(); } else if (const LoadInst *LI = dyn_cast(this)) { if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) { ConstantInt *CI = mdconst::extract(MD->getOperand(0)); - return MaybeAlign(CI->getLimitedValue()); + return Align(CI->getLimitedValue()); } } else if (auto *CstPtr = dyn_cast(this)) { if (auto *CstInt = dyn_cast_or_null(ConstantExpr::getPtrToInt( @@ -807,7 +800,7 @@ : Value::MaximumAlignment); } } - return llvm::None; + return Align(1); } const Value *Value::DoPHITranslation(const BasicBlock *CurBB, diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -2485,8 +2485,8 @@ def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{ if (auto *G = dyn_cast(N)) { const DataLayout &DL = MF->getDataLayout(); - MaybeAlign Align = G->getGlobal()->getPointerAlignment(DL); - return Align && *Align >= 4 && G->getOffset() % 4 == 0; + Align Align = G->getGlobal()->getPointerAlignment(DL); + return Align >= 4 && G->getOffset() % 4 == 0; } if (auto *C = dyn_cast(N)) return C->getAlign() >= 4 && C->getOffset() % 4 == 0; diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -3523,17 +3523,6 @@ // ------------------------ Align Argument Attribute ------------------------ -/// \p Ptr is accessed so we can get alignment information if the ABI requires -/// the element type to be aligned. -static MaybeAlign getKnownAlignmentFromAccessedPtr(const Value *Ptr, - const DataLayout &DL) { - MaybeAlign KnownAlignment = Ptr->getPointerAlignment(DL); - Type *ElementTy = Ptr->getType()->getPointerElementType(); - if (ElementTy->isSized()) - KnownAlignment = max(KnownAlignment, DL.getABITypeAlign(ElementTy)); - return KnownAlignment; -} - static unsigned getKnownAlignForUse(Attributor &A, AbstractAttribute &QueryingAA, Value &AssociatedValue, const Use *U, @@ -3569,19 +3558,11 @@ const DataLayout &DL = A.getDataLayout(); const Value *UseV = U->get(); if (auto *SI = dyn_cast(I)) { - if (SI->getPointerOperand() == UseV) { - if (unsigned SIAlign = SI->getAlignment()) - MA = MaybeAlign(SIAlign); - else - MA = getKnownAlignmentFromAccessedPtr(UseV, DL); - } + if (SI->getPointerOperand() == UseV) + MA = SI->getAlign(); } else if (auto *LI = dyn_cast(I)) { - if (LI->getPointerOperand() == UseV) { - if (unsigned LIAlign = LI->getAlignment()) - MA = MaybeAlign(LIAlign); - else - MA = getKnownAlignmentFromAccessedPtr(UseV, DL); - } + if (LI->getPointerOperand() == UseV) + MA = LI->getAlign(); } if (!MA.hasValue() || MA <= 1) @@ -3622,8 +3603,7 @@ // their uses and int2ptr is not handled. It is not a correctness // problem though! if (!V.getType()->getPointerElementType()->isFunctionTy()) - takeKnownMaximum( - V.getPointerAlignment(A.getDataLayout()).valueOrOne().value()); + takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value()); if (getIRPosition().isFnInterfaceKind() && (!getAnchorScope() || @@ -3664,9 +3644,9 @@ ChangeStatus Changed = AAAlign::manifest(A); - MaybeAlign InheritAlign = + Align InheritAlign = getAssociatedValue().getPointerAlignment(A.getDataLayout()); - if (InheritAlign.valueOrOne() >= getAssumedAlign()) + if (InheritAlign >= getAssumedAlign()) return LoadStoreChanged; return Changed | LoadStoreChanged; } @@ -3717,8 +3697,8 @@ const auto &AA = A.getAAFor(*this, IRPosition::value(V)); if (!Stripped && this == &AA) { // Use only IR information if we did not strip anything. - const MaybeAlign PA = V.getPointerAlignment(DL); - T.takeKnownMaximum(PA ? PA->value() : 0); + Align PA = V.getPointerAlignment(DL); + T.takeKnownMaximum(PA.value()); T.indicatePessimisticFixpoint(); } else { // Use abstract attribute information. @@ -3786,9 +3766,9 @@ if (A.getInfoCache().isInvolvedInMustTailCall(*Arg)) return ChangeStatus::UNCHANGED; ChangeStatus Changed = AAAlignImpl::manifest(A); - MaybeAlign InheritAlign = + Align InheritAlign = getAssociatedValue().getPointerAlignment(A.getDataLayout()); - if (InheritAlign.valueOrOne() >= getAssumedAlign()) + if (InheritAlign >= getAssumedAlign()) Changed = ChangeStatus::UNCHANGED; return Changed; } diff --git a/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll b/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll --- a/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll +++ b/llvm/test/Analysis/ValueTracking/memory-dereferenceable.ll @@ -51,10 +51,10 @@ %sret_gep_outside = getelementptr %struct.A, %struct.A* %result, i64 0, i32 1, i64 7 load i8, i8* %sret_gep_outside -; CHECK: %dparam{{.*}}(aligned) +; CHECK: %dparam{{.*}}(unaligned) %load3 = load i32, i32 addrspace(1)* %dparam -; CHECK: %relocate{{.*}}(aligned) +; CHECK: %relocate{{.*}}(unaligned) %tok = tail call token (i64, i32, i1 ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_i1f(i64 0, i32 0, i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 0, i32 addrspace(1)* %dparam) %relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(token %tok, i32 7, i32 7) %load4 = load i32, i32 addrspace(1)* %relocate @@ -70,7 +70,7 @@ %load6 = load i32, i32* %nd_load ; Load from a dereferenceable load -; CHECK: %d4_load{{.*}}(aligned) +; CHECK: %d4_load{{.*}}(unaligned) %d4_load = load i32*, i32** @globali32ptr, !dereferenceable !0 %load7 = load i32, i32* %d4_load @@ -85,7 +85,7 @@ %load9 = load i32, i32* %d_or_null_load ; Load from a non-null pointer with dereferenceable_or_null -; CHECK: %d_or_null_non_null_load{{.*}}(aligned) +; CHECK: %d_or_null_non_null_load{{.*}}(unaligned) %d_or_null_non_null_load = load i32*, i32** @globali32ptr, !nonnull !2, !dereferenceable_or_null !0 %load10 = load i32, i32* %d_or_null_non_null_load diff --git a/llvm/test/Transforms/GVN/PRE/load-pre-licm.ll b/llvm/test/Transforms/GVN/PRE/load-pre-licm.ll --- a/llvm/test/Transforms/GVN/PRE/load-pre-licm.ll +++ b/llvm/test/Transforms/GVN/PRE/load-pre-licm.ll @@ -192,7 +192,7 @@ br label %header } -define i32 @test6b(i1 %cnd, i32* dereferenceable(8) %p) { +define i32 @test6b(i1 %cnd, i32* dereferenceable(8) align 4 %p) { entry: ; CHECK-LABEL: @test6b ; CHECK: load i32, i32* %p diff --git a/llvm/test/Transforms/GVN/PRE/pre-load.ll b/llvm/test/Transforms/GVN/PRE/pre-load.ll --- a/llvm/test/Transforms/GVN/PRE/pre-load.ll +++ b/llvm/test/Transforms/GVN/PRE/pre-load.ll @@ -507,7 +507,7 @@ ; dereferenceable can be loaded from speculatively without a risk of trapping. ; Since it is OK to speculate, PRE is allowed. -define i32 @test15(i32* noalias nocapture readonly dereferenceable(8) %x, i32* noalias nocapture %r, i32 %a) { +define i32 @test15(i32* noalias nocapture readonly dereferenceable(8) align 4 %x, i32* noalias nocapture %r, i32 %a) { ; CHECK-LABEL: @test15 ; CHECK: entry: @@ -548,7 +548,7 @@ ; dereferenceable can be loaded from speculatively without a risk of trapping. ; Since it is OK to speculate, PRE is allowed. -define i32 @test16(i32* noalias nocapture readonly dereferenceable(8) %x, i32* noalias nocapture %r, i32 %a) { +define i32 @test16(i32* noalias nocapture readonly dereferenceable(8) align 4 %x, i32* noalias nocapture %r, i32 %a) { ; CHECK-LABEL: @test16( ; CHECK: entry: diff --git a/llvm/test/Transforms/InstCombine/call-guard.ll b/llvm/test/Transforms/InstCombine/call-guard.ll --- a/llvm/test/Transforms/InstCombine/call-guard.ll +++ b/llvm/test/Transforms/InstCombine/call-guard.ll @@ -67,7 +67,7 @@ ret void } -define void @deref_load(i32 %V1, i32* dereferenceable(4) %P) { +define void @deref_load(i32 %V1, i32* dereferenceable(4) align 4 %P) { ; CHECK-LABEL: @deref_load ; CHECK-NEXT: %V2 = load i32, i32* %P, align 4 ; CHECK-NEXT: %1 = and i32 %V2, %V1 diff --git a/llvm/test/Transforms/InstCombine/masked_intrinsics.ll b/llvm/test/Transforms/InstCombine/masked_intrinsics.ll --- a/llvm/test/Transforms/InstCombine/masked_intrinsics.ll +++ b/llvm/test/Transforms/InstCombine/masked_intrinsics.ll @@ -83,7 +83,7 @@ ret <2 x double> %res } -define <2 x double> @load_speculative(<2 x double>* dereferenceable(16) %ptr, +define <2 x double> @load_speculative(<2 x double>* dereferenceable(16) align 4 %ptr, ; CHECK-LABEL: @load_speculative( ; CHECK-NEXT: [[PTV1:%.*]] = insertelement <2 x double> undef, double [[PT:%.*]], i64 0 ; CHECK-NEXT: [[PTV2:%.*]] = shufflevector <2 x double> [[PTV1]], <2 x double> undef, <2 x i32> zeroinitializer diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll --- a/llvm/test/Transforms/InstCombine/select.ll +++ b/llvm/test/Transforms/InstCombine/select.ll @@ -981,7 +981,7 @@ ; Test that we can speculate the loads around the select even when we can't ; fold the load completely away. -define i32 @test78_deref(i1 %flag, i32* dereferenceable(4) %x, i32* dereferenceable(4) %y, i32* %z) { +define i32 @test78_deref(i1 %flag, i32* dereferenceable(4) align 4 %x, i32* dereferenceable(4) align 4 %y, i32* %z) { ; CHECK-LABEL: @test78_deref( ; CHECK-NEXT: [[X_VAL:%.*]] = load i32, i32* [[X:%.*]], align 4 ; CHECK-NEXT: [[Y_VAL:%.*]] = load i32, i32* [[Y:%.*]], align 4 diff --git a/llvm/test/Transforms/LICM/hoist-deref-load.ll b/llvm/test/Transforms/LICM/hoist-deref-load.ll --- a/llvm/test/Transforms/LICM/hoist-deref-load.ll +++ b/llvm/test/Transforms/LICM/hoist-deref-load.ll @@ -19,7 +19,7 @@ ; CHECK: load i32, i32* %c, align 4 ; CHECK: for.body: -define void @test1(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly nonnull dereferenceable(4) %c, i32 %n) #0 { +define void @test1(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly nonnull dereferenceable(4) align 4 %c, i32 %n) #0 { entry: %cmp11 = icmp sgt i32 %n, 0 br i1 %cmp11, label %for.body, label %for.end @@ -99,7 +99,7 @@ ; CHECK: load i32, i32* %c2, align 4 ; CHECK: for.body: -define void @test3(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly dereferenceable(12) %c, i32 %n) #0 { +define void @test3(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* nocapture readonly dereferenceable(12) align 4 %c, i32 %n) #0 { entry: %cmp11 = icmp sgt i32 %n, 0 br i1 %cmp11, label %for.body, label %for.end @@ -183,7 +183,7 @@ ; CHECK: load i32, i32* %c, align 4 ; CHECK: for.body: -define void @test5(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) %c, i32 %n) #0 { +define void @test5(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) align 4 %c, i32 %n) #0 { entry: %not_null = icmp ne i32* %c, null br i1 %not_null, label %not.null, label %for.end @@ -274,7 +274,7 @@ define void @test7(i32* noalias %a, i32* %b, i32** %cptr, i32 %n) #0 { entry: - %c = load i32*, i32** %cptr, !dereferenceable !0 + %c = load i32*, i32** %cptr, !dereferenceable !0, !align !{i64 4} %cmp11 = icmp sgt i32 %n, 0 br i1 %cmp11, label %for.body, label %for.end @@ -321,7 +321,7 @@ define void @test8(i32* noalias %a, i32* %b, i32** %cptr, i32 %n) #0 { entry: - %c = load i32*, i32** %cptr, !dereferenceable_or_null !0 + %c = load i32*, i32** %cptr, !dereferenceable_or_null !0, !align !{i64 4} %not_null = icmp ne i32* %c, null br i1 %not_null, label %not.null, label %for.end @@ -405,7 +405,7 @@ ; CHECK: if.then: ; CHECK: load i32, i32* %c, align 4 -define void @test10(i32* noalias %a, i32* %b, i32** dereferenceable(8) %cptr, i32 %n) #0 { +define void @test10(i32* noalias %a, i32* %b, i32** dereferenceable(8) align 8 %cptr, i32 %n) #0 { entry: %cmp11 = icmp sgt i32 %n, 0 br i1 %cmp11, label %for.body, label %for.end @@ -475,7 +475,7 @@ declare void @llvm.experimental.guard(i1, ...) -define void @test12(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) %c, i32 %n) #0 { +define void @test12(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) align 4 %c, i32 %n) #0 { ; Prove non-null ness of %c via a guard, not a branch. ; CHECK-LABEL: @test12( @@ -560,7 +560,7 @@ ; Check that branch by condition "null check AND something" allows to hoist the ; load. -define void @test14(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) %c, i32 %n, i1 %dummy_cond) #0 { +define void @test14(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) align 4 %c, i32 %n, i1 %dummy_cond) #0 { ; CHECK-LABEL: @test14 ; CHECK: load i32, i32* %c, align 4 @@ -602,7 +602,7 @@ ; Check that guard by condition "null check AND something" allows to hoist the ; load. -define void @test15(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) %c, i32 %n, i1 %dummy_cond) #0 { +define void @test15(i32* noalias %a, i32* %b, i32* dereferenceable_or_null(4) align 4 %c, i32 %n, i1 %dummy_cond) #0 { ; CHECK-LABEL: @test15 ; CHECK: load i32, i32* %c, align 4 diff --git a/llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll b/llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll --- a/llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll +++ b/llvm/test/Transforms/SimplifyCFG/SpeculativeExec.ll @@ -119,7 +119,7 @@ ret i8* %x10 } -define i32* @test5(i32 %a, i32 %b, i32 %c, i32* dereferenceable(10) %ptr1, i32* dereferenceable(10) %ptr2, i32** dereferenceable(10) %ptr3) { +define i32* @test5(i32 %a, i32 %b, i32 %c, i32* dereferenceable(10) %ptr1, i32* dereferenceable(10) %ptr2, i32** dereferenceable(10) align 8 %ptr3) { ; CHECK-LABEL: @test5( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[T1:%.*]] = icmp eq i32 [[B:%.*]], 0 diff --git a/llvm/test/Transforms/TailCallElim/reorder_load.ll b/llvm/test/Transforms/TailCallElim/reorder_load.ll --- a/llvm/test/Transforms/TailCallElim/reorder_load.ll +++ b/llvm/test/Transforms/TailCallElim/reorder_load.ll @@ -126,7 +126,7 @@ ; This load can be moved above the call because the function won't write to it ; and the a_arg is dereferenceable. -define fastcc i32 @raise_load_5(i32* dereferenceable(4) %a_arg, i32 %a_len_arg, i32 %start_arg) readonly { +define fastcc i32 @raise_load_5(i32* dereferenceable(4) align 4 %a_arg, i32 %a_len_arg, i32 %start_arg) readonly { ; CHECK-LABEL: @raise_load_5( ; CHECK-NOT: call ; CHECK: load i32, i32* diff --git a/llvm/unittests/IR/FunctionTest.cpp b/llvm/unittests/IR/FunctionTest.cpp --- a/llvm/unittests/IR/FunctionTest.cpp +++ b/llvm/unittests/IR/FunctionTest.cpp @@ -143,7 +143,7 @@ FunctionType *FuncType(FunctionType::get(VoidType, false)); std::unique_ptr Func(Function::Create( FuncType, GlobalValue::ExternalLinkage)); - EXPECT_EQ(MaybeAlign(), Func->getPointerAlignment(DataLayout(""))); + EXPECT_EQ(Align(1), Func->getPointerAlignment(DataLayout(""))); EXPECT_EQ(Align(1), Func->getPointerAlignment(DataLayout("Fi8"))); EXPECT_EQ(Align(1), Func->getPointerAlignment(DataLayout("Fn8"))); EXPECT_EQ(Align(2), Func->getPointerAlignment(DataLayout("Fi16"))); @@ -153,7 +153,7 @@ Func->setAlignment(Align(4)); - EXPECT_EQ(MaybeAlign(), Func->getPointerAlignment(DataLayout(""))); + EXPECT_EQ(Align(1), Func->getPointerAlignment(DataLayout(""))); EXPECT_EQ(Align(1), Func->getPointerAlignment(DataLayout("Fi8"))); EXPECT_EQ(Align(4), Func->getPointerAlignment(DataLayout("Fn8"))); EXPECT_EQ(Align(2), Func->getPointerAlignment(DataLayout("Fi16"))); diff --git a/polly/test/ScopInfo/invariant_load_dereferenceable.ll b/polly/test/ScopInfo/invariant_load_dereferenceable.ll --- a/polly/test/ScopInfo/invariant_load_dereferenceable.ll +++ b/polly/test/ScopInfo/invariant_load_dereferenceable.ll @@ -17,7 +17,7 @@ ; CHECK-NOT: Function: foo_undereferanceable -define void @foo_dereferanceable(double* %A, double* %B, i64* dereferenceable(8) %sizeA_ptr, +define void @foo_dereferanceable(double* %A, double* %B, i64* dereferenceable(8) align 8 %sizeA_ptr, i32 %lb.i, i32 %lb.j, i32 %ub.i, i32 %ub.j) { entry: br label %for.i