Index: llvm/trunk/lib/Analysis/Loads.cpp =================================================================== --- llvm/trunk/lib/Analysis/Loads.cpp +++ llvm/trunk/lib/Analysis/Loads.cpp @@ -71,6 +71,9 @@ Align = DL.getABITypeAlignment(V->getType()->getPointerElementType()); assert(isPowerOf2_32(Align)); + if (isDereferenceableAndAlignedPointer(V, Align, DL)) + return true; + int64_t ByteOffset = 0; Value *Base = V; Base = GetPointerBaseWithConstantOffset(V, ByteOffset, DL); Index: llvm/trunk/lib/Transforms/Scalar/SROA.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/SROA.cpp +++ llvm/trunk/lib/Transforms/Scalar/SROA.cpp @@ -1192,8 +1192,7 @@ // If this pointer is always safe to load, or if we can prove that there // is already a load in the block, then we can move the load to the pred // block. - if (isDereferenceablePointer(InVal, DL) || - isSafeToLoadUnconditionally(InVal, MaxAlign, TI)) + if (isSafeToLoadUnconditionally(InVal, MaxAlign, TI)) continue; return false; @@ -1262,8 +1261,6 @@ Value *TValue = SI.getTrueValue(); Value *FValue = SI.getFalseValue(); const DataLayout &DL = SI.getModule()->getDataLayout(); - bool TDerefable = isDereferenceablePointer(TValue, DL); - bool FDerefable = isDereferenceablePointer(FValue, DL); for (User *U : SI.users()) { LoadInst *LI = dyn_cast(U); @@ -1273,11 +1270,9 @@ // Both operands to the select need to be dereferencable, either // absolutely (e.g. allocas) or at this point because we can see other // accesses to it. - if (!TDerefable && - !isSafeToLoadUnconditionally(TValue, LI->getAlignment(), LI)) + if (!isSafeToLoadUnconditionally(TValue, LI->getAlignment(), LI)) return false; - if (!FDerefable && - !isSafeToLoadUnconditionally(FValue, LI->getAlignment(), LI)) + if (!isSafeToLoadUnconditionally(FValue, LI->getAlignment(), LI)) return false; } Index: llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ llvm/trunk/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -1141,8 +1141,6 @@ /// the select can be loaded unconditionally. static bool isSafeSelectToSpeculate(SelectInst *SI) { const DataLayout &DL = SI->getModule()->getDataLayout(); - bool TDerefable = isDereferenceablePointer(SI->getTrueValue(), DL); - bool FDerefable = isDereferenceablePointer(SI->getFalseValue(), DL); for (User *U : SI->users()) { LoadInst *LI = dyn_cast(U); @@ -1150,12 +1148,10 @@ // Both operands to the select need to be dereferencable, either absolutely // (e.g. allocas) or at this point because we can see other accesses to it. - if (!TDerefable && - !isSafeToLoadUnconditionally(SI->getTrueValue(), LI->getAlignment(), + if (!isSafeToLoadUnconditionally(SI->getTrueValue(), LI->getAlignment(), LI)) return false; - if (!FDerefable && - !isSafeToLoadUnconditionally(SI->getFalseValue(), LI->getAlignment(), + if (!isSafeToLoadUnconditionally(SI->getFalseValue(), LI->getAlignment(), LI)) return false; } @@ -1229,8 +1225,7 @@ // If this pointer is always safe to load, or if we can prove that there is // already a load in the block, then we can move the load to the pred block. - if (isDereferenceablePointer(InVal, DL) || - isSafeToLoadUnconditionally(InVal, MaxAlign, Pred->getTerminator())) + if (isSafeToLoadUnconditionally(InVal, MaxAlign, Pred->getTerminator())) continue; return false; Index: llvm/trunk/test/Transforms/InstCombine/select.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/select.ll +++ llvm/trunk/test/Transforms/InstCombine/select.ll @@ -1296,6 +1296,20 @@ ret i32 %v } +define i32 @test78_deref(i1 %flag, i32* dereferenceable(4) %x, i32* dereferenceable(4) %y, i32* %z) { +; Test that we can speculate the loads around the select even when we can't +; fold the load completely away. +; CHECK-LABEL: @test78_deref( +; CHECK: %[[V1:.*]] = load i32, i32* %x +; CHECK-NEXT: %[[V2:.*]] = load i32, i32* %y +; CHECK-NEXT: %[[S:.*]] = select i1 %flag, i32 %[[V1]], i32 %[[V2]] +; CHECK-NEXT: ret i32 %[[S]] +entry: + %p = select i1 %flag, i32* %x, i32* %y + %v = load i32, i32* %p + ret i32 %v +} + define i32 @test78_neg(i1 %flag, i32* %x, i32* %y, i32* %z) { ; The same as @test78 but we can't speculate the load because it can trap ; if under-aligned. @@ -1313,6 +1327,19 @@ ret i32 %v } +define i32 @test78_deref_neg(i1 %flag, i32* dereferenceable(2) %x, i32* dereferenceable(4) %y, i32* %z) { +; The same as @test78_deref but we can't speculate the load because +; one of the arguments is not sufficiently dereferenceable. +; CHECK-LABEL: @test78_deref_neg( +; CHECK: %p = select i1 %flag, i32* %x, i32* %y +; CHECK-NEXT: %v = load i32, i32* %p +; CHECK-NEXT: ret i32 %v +entry: + %p = select i1 %flag, i32* %x, i32* %y + %v = load i32, i32* %p + ret i32 %v +} + define float @test79(i1 %flag, float* %x, i32* %y, i32* %z) { ; Test that we can speculate the loads around the select even when we can't ; fold the load completely away. Index: llvm/trunk/test/Transforms/TailCallElim/dont_reorder_load.ll =================================================================== --- llvm/trunk/test/Transforms/TailCallElim/dont_reorder_load.ll +++ llvm/trunk/test/Transforms/TailCallElim/dont_reorder_load.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -tailcallelim -S | grep call | count 3 +; RUN: opt < %s -tailcallelim -S | grep call | count 4 ; PR4323 ; Several cases where tail call elimination should not move the load above the @@ -62,3 +62,21 @@ %tmp10 = add i32 %tmp9, %tmp8 ; [#uses=1] ret i32 %tmp10 } + +; This load can NOT be moved above the call because the a_arg is not +; sufficiently dereferenceable. +define fastcc i32 @no_tailrecelim_4(i32* dereferenceable(2) %a_arg, i32 %a_len_arg, i32 %start_arg) readonly { +entry: + %tmp2 = icmp sge i32 %start_arg, %a_len_arg ; [#uses=1] + br i1 %tmp2, label %if, label %else + +if: ; preds = %entry + ret i32 0 + +else: ; preds = %entry + %tmp7 = add i32 %start_arg, 1 ; [#uses=1] + %tmp8 = call fastcc i32 @no_tailrecelim_4(i32* %a_arg, i32 %a_len_arg, i32 %tmp7) ; [#uses=1] + %tmp9 = load i32, i32* %a_arg ; [#uses=1] + %tmp10 = add i32 %tmp9, %tmp8 ; [#uses=1] + ret i32 %tmp10 +} Index: llvm/trunk/test/Transforms/TailCallElim/reorder_load.ll =================================================================== --- llvm/trunk/test/Transforms/TailCallElim/reorder_load.ll +++ llvm/trunk/test/Transforms/TailCallElim/reorder_load.ll @@ -122,3 +122,26 @@ %tmp10 = add i32 %second, %tmp8 ; [#uses=1] ret i32 %tmp10 } + +; This load can be moved above the call because the function won't write to it +; and the a_arg is dereferenceable. +define fastcc i32 @raise_load_5(i32* dereferenceable(4) %a_arg, i32 %a_len_arg, i32 %start_arg) readonly { +; CHECK-LABEL: @raise_load_5( +; CHECK-NOT: call +; CHECK: load i32, i32* +; CHECK-NOT: call +; CHECK: } +entry: + %tmp2 = icmp sge i32 %start_arg, %a_len_arg ; [#uses=1] + br i1 %tmp2, label %if, label %else + +if: ; preds = %entry + ret i32 0 + +else: ; preds = %entry + %tmp7 = add i32 %start_arg, 1 ; [#uses=1] + %tmp8 = call fastcc i32 @raise_load_5(i32* %a_arg, i32 %a_len_arg, i32 %tmp7) ; [#uses=1] + %tmp9 = load i32, i32* %a_arg ; [#uses=1] + %tmp10 = add i32 %tmp9, %tmp8 ; [#uses=1] + ret i32 %tmp10 +}