diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -670,6 +670,14 @@ unsigned BitWidth = Known.getBitWidth(); + // Refine Known set if the pointer alignment is set by assume bundles. + if (V->getType()->isPointerTy()) { + if (RetainedKnowledge RK = getKnowledgeValidInContext( + V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) { + Known.Zero.setLowBits(Log2_32(RK.ArgValue)); + } + } + // Note that the patterns below need to be kept in sync with the code // in AssumptionCache::updateAffectedValues. diff --git a/llvm/test/Analysis/BasicAA/featuretest.ll b/llvm/test/Analysis/BasicAA/featuretest.ll --- a/llvm/test/Analysis/BasicAA/featuretest.ll +++ b/llvm/test/Analysis/BasicAA/featuretest.ll @@ -128,7 +128,7 @@ ; USE_ASSUME-LABEL: @gep_distance_test3( ; USE_ASSUME-NEXT: [[C1:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 1 ; USE_ASSUME-NEXT: [[C:%.*]] = bitcast i32* [[C1]] to i8* -; USE_ASSUME-NEXT: store i8 42, i8* [[C]], align 1 +; USE_ASSUME-NEXT: store i8 42, i8* [[C]], align 4 ; USE_ASSUME-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[A]], i64 4), "nonnull"(i32* [[A]]), "align"(i32* [[A]], i64 4) ] ; USE_ASSUME-NEXT: ret i32 0 ; diff --git a/llvm/test/Analysis/ValueTracking/assume-queries-counter.ll b/llvm/test/Analysis/ValueTracking/assume-queries-counter.ll --- a/llvm/test/Analysis/ValueTracking/assume-queries-counter.ll +++ b/llvm/test/Analysis/ValueTracking/assume-queries-counter.ll @@ -2,8 +2,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -instcombine --debug-counter=assume-queries-counter-skip=0,assume-queries-counter-count=1 -S | FileCheck %s --check-prefixes=SAME,COUNTER1 -; RUN: opt < %s -instcombine --debug-counter=assume-queries-counter-skip=1,assume-queries-counter-count=2 -S | FileCheck %s --check-prefixes=SAME,COUNTER2 -; RUN: opt < %s -instcombine --debug-counter=assume-queries-counter-skip=2,assume-queries-counter-count=1 -S | FileCheck %s --check-prefixes=SAME,COUNTER3 +; RUN: opt < %s -instcombine --debug-counter=assume-queries-counter-skip=1,assume-queries-counter-count=6 -S | FileCheck %s --check-prefixes=SAME,COUNTER2 +; RUN: opt < %s -instcombine --debug-counter=assume-queries-counter-skip=6,assume-queries-counter-count=1 -S | FileCheck %s --check-prefixes=SAME,COUNTER3 declare i1 @get_val() declare void @llvm.assume(i1) diff --git a/llvm/test/Transforms/InstCombine/assume-align.ll b/llvm/test/Transforms/InstCombine/assume-align.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/assume-align.ll @@ -0,0 +1,80 @@ +; RUN: opt -S -passes=instcombine,simplify-cfg < %s 2>&1 | FileCheck %s + +declare void @llvm.assume(i1 noundef) + +define void @f1(i8* %a) { +; CHECK-LABEL: @f1( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i64 4 +; CHECK-NEXT: [[TMP0:%.*]] = ptrtoint i8* [[PTR]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 3 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[TMP2]], label [[IF_THEN:%.*]], label [[IF_END:%.*]] +; CHECK: if.then: +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[PTR]], i64 4) ] +; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[PTR]] to i32* +; CHECK-NEXT: store i32 4, i32* [[TMP3]], align 4 +; CHECK-NEXT: br label [[IF_END]] +; CHECK: if.end: +; CHECK-NEXT: ret void +; +entry: + %ptr = getelementptr inbounds i8, i8* %a, i64 4 + %0 = ptrtoint i8* %ptr to i64 + %1 = and i64 %0, 3 + %2 = icmp eq i64 %1, 0 + br i1 %2, label %if.then, label %if.end + +if.then: ; preds = %entry + call void @llvm.assume(i1 true) [ "align"(i8* %ptr, i64 4) ] + %3 = ptrtoint i8* %ptr to i64 + %4 = and i64 %3, 3 + %5 = icmp eq i64 %4, 0 + br i1 %5, label %if.then1, label %if.else1 + +if.then1: ; preds = %if.then + %6 = bitcast i8* %ptr to i32* + store i32 4, i32* %6, align 4 + br label %if.end + +if.else1: ; preds = %if.then + store i8 1, i8* %ptr, align 1 + br label %if.end + +if.end: ; preds = %if.then1, %if.else1, %entry + ret void +} + +; TODO: We could fold away the branch "br i1 %3, ..." by either using a GEP or make getKnowledgeValidInContext aware the alignment bundle offset, and the improvement of value tracking of GEP. + +define void @f2(i8* %a) { +; CHECK-LABEL: @f2( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[A:%.*]], i64 32, i32 24) ] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 8 +; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8* [[TMP0]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], 8 +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[TMP3]], label [[IF_THEN1:%.*]], label [[IF_ELSE1:%.*]] +; +entry: + call void @llvm.assume(i1 true) [ "align"(i8* %a, i64 32, i32 24) ] + %0 = getelementptr inbounds i8, i8* %a, i64 8 + %1 = ptrtoint i8* %0 to i64 + %2 = and i64 %1, 15 + %3 = icmp eq i64 %2, 0 + br i1 %3, label %if.then, label %if.else + +if.then: ; preds = %entry + %4 = bitcast i8* %0 to i64* + store i64 16, i64* %4, align 4 + br label %if.end + +if.else: ; preds = %entry + store i8 1, i8* %0, align 1 + br label %if.end + +if.end: ; preds = %if.else, %if.then + ret void +} +