diff --git a/llvm/test/Analysis/ValueTracking/known-non-zero-through-dom-use.ll b/llvm/test/Analysis/ValueTracking/known-non-zero-through-dom-use.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Analysis/ValueTracking/known-non-zero-through-dom-use.ll @@ -0,0 +1,432 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=instsimplify < %s -S | FileCheck %s + +declare void @llvm.assume(i1) +declare i8 @llvm.abs.i8(i8, i1) +declare i8 @llvm.bitreverse.i8(i8) +declare i16 @llvm.bswap.i16(i16) +declare i8 @llvm.ctpop.i8(i8) +declare void @use1(i1) + +;; Throughout use: X > Y || Y == 0 which folds to X > Y iff X known +;; non-zero. Do this because many of the expressions already have +;; hardcoded cases for folding Foo(X) == 0 -> X == 0 and we want to +;; test explicitly that `isKnownNonZero` works. + +define i1 @check_neg(i8 %x, i8 %y) { +; CHECK-LABEL: @check_neg( +; CHECK-NEXT: [[Z:%.*]] = sub i8 0, [[X:%.*]] +; CHECK-NEXT: [[NE:%.*]] = icmp ugt i8 [[Z]], 4 +; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; + %z = sub i8 0, %x + %ne = icmp ugt i8 %z, 4 + call void @llvm.assume(i1 %ne) + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +} + +define i1 @check_neg_fail(i8 %x, i8 %y) { +; CHECK-LABEL: @check_neg_fail( +; CHECK-NEXT: [[Z:%.*]] = sub i8 0, [[X:%.*]] +; CHECK-NEXT: [[Z2:%.*]] = xor i8 [[Z]], 1 +; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[Z2]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; + %z = sub i8 0, %x + %z2 = xor i8 %z, 1 + %ne = icmp ne i8 %z2, 0 + call void @llvm.assume(i1 %ne) + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +} + +define i1 @check_abs(i8 %x, i8 %y) { +; CHECK-LABEL: @check_abs( +; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.abs.i8(i8 [[X:%.*]], i1 true) +; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[Z]], 0 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: true: +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; CHECK: false: +; CHECK-NEXT: call void @use1(i1 [[NE]]) +; CHECK-NEXT: ret i1 [[NE]] +; + %z = call i8 @llvm.abs.i8(i8 %x, i1 true) + %ne = icmp ne i8 %z, 0 + br i1 %ne, label %true, label %false +true: + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +false: + ;; So that branch doesn't fold to select. + call void @use1(i1 %ne) + ret i1 %ne +} + +define i1 @check_bswap(i16 %x, i16 %y) { +; CHECK-LABEL: @check_bswap( +; CHECK-NEXT: [[Z:%.*]] = call i16 @llvm.bswap.i16(i16 [[X:%.*]]) +; CHECK-NEXT: [[NE:%.*]] = icmp sgt i16 [[Z]], 3 +; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i16 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i16 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; + %z = call i16 @llvm.bswap.i16(i16 %x) + %ne = icmp sgt i16 %z, 3 + call void @llvm.assume(i1 %ne) + %cmp0 = icmp ugt i16 %x, %y + %cmp1 = icmp eq i16 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +} + +define i1 @check_bitreverse(i8 %x, i8 %y) { +; CHECK-LABEL: @check_bitreverse( +; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[X:%.*]]) +; CHECK-NEXT: [[NE:%.*]] = icmp slt i8 [[Z]], -4 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: false: +; CHECK-NEXT: [[RT:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: call void @use1(i1 [[RT]]) +; CHECK-NEXT: ret i1 [[RT]] +; CHECK: true: +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R1:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: [[RF:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[R1]], [[RF]] +; CHECK-NEXT: ret i1 [[R]] +; + %z = call i8 @llvm.bitreverse.i8(i8 %x) + %ne = icmp slt i8 %z, -4 + br i1 %ne, label %true, label %false +false: + ;; This is mostly to test that we don't constant fold this the + ;; wrong-way (false), but should in fact be foldable to true. Maybe + ;; an `isKnownZero` that goes through the same level of analysis as + ;; `isKnownNonZero` would be useful. + %rt = icmp eq i8 %x, 0 + ;; So that branch doesn't fold to select. + call void @use1(i1 %rt) + ret i1 %rt +true: + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r1 = or i1 %cmp0, %cmp1 + %rf = icmp eq i8 %x, 0 + %r = or i1 %r1, %rf + ret i1 %r +} + +define i1 @check_ctpop(i8 %x, i8 %y) { +; CHECK-LABEL: @check_ctpop( +; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.ctpop.i8(i8 [[X:%.*]]) +; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[Z]], 0 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: true: +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; CHECK: false: +; CHECK-NEXT: call void @use1(i1 [[NE]]) +; CHECK-NEXT: ret i1 [[NE]] +; + %z = call i8 @llvm.ctpop.i8(i8 %x) + %ne = icmp ne i8 %z, 0 + br i1 %ne, label %true, label %false +true: + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +false: + ;; So that branch doesn't fold to select. + call void @use1(i1 %ne) + ret i1 %ne +} + +define i1 @check_ctpop_fail(i8 %x, i8 %y) { +; CHECK-LABEL: @check_ctpop_fail( +; CHECK-NEXT: [[X1:%.*]] = add i8 [[X:%.*]], 123 +; CHECK-NEXT: [[X2:%.*]] = and i8 [[X1]], -43 +; CHECK-NEXT: [[Z:%.*]] = call i8 @llvm.ctpop.i8(i8 [[X2]]) +; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[Z]], 0 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: true: +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; CHECK: false: +; CHECK-NEXT: call void @use1(i1 [[NE]]) +; CHECK-NEXT: ret i1 [[NE]] +; + %x1 = add i8 %x, 123 + %x2 = and i8 %x1, 213 + %z = call i8 @llvm.ctpop.i8(i8 %x2) + %ne = icmp ne i8 %z, 0 + br i1 %ne, label %true, label %false +true: + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +false: + ;; So that branch doesn't fold to select. + call void @use1(i1 %ne) + ret i1 %ne +} + +define i1 @check_and(i8 %x, i8 %y) { +; CHECK-LABEL: @check_and( +; CHECK-NEXT: [[Z:%.*]] = and i8 [[X:%.*]], 123 +; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[Z]], 0 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: true: +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; CHECK: false: +; CHECK-NEXT: [[RT:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: call void @use1(i1 [[RT]]) +; CHECK-NEXT: ret i1 [[RT]] +; + %z = and i8 %x, 123 + %ne = icmp ne i8 %z, 0 + br i1 %ne, label %true, label %false +true: + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +false: + ;; This is mostly to test that we don't constant fold this the + ;; wrong-way (false), but should in fact be foldable to true. Maybe + ;; an `isKnownZero` that goes through the same level of analysis as + ;; `isKnownNonZero` would be useful. + %rt = icmp eq i8 %x, 0 + ;; So that branch doesn't fold to select. + call void @use1(i1 %rt) + ret i1 %rt +} + +define i1 @check_and_fail(i8 %x, i8 %y) { +; CHECK-LABEL: @check_and_fail( +; CHECK-NEXT: [[Z:%.*]] = and i8 [[X:%.*]], 123 +; CHECK-NEXT: [[NE:%.*]] = icmp slt i8 [[Z]], 5 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: true: +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; CHECK: false: +; CHECK-NEXT: [[RT:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: call void @use1(i1 [[RT]]) +; CHECK-NEXT: ret i1 [[RT]] +; + %z = and i8 %x, 123 + %ne = icmp slt i8 %z, 5 + br i1 %ne, label %true, label %false +true: + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +false: + ;; This is mostly to test that we don't constant fold this the + ;; wrong-way (false), but should in fact be foldable to true. Maybe + ;; an `isKnownZero` that goes through the same level of analysis as + ;; `isKnownNonZero` would be useful. + %rt = icmp eq i8 %x, 0 + ;; So that branch doesn't fold to select. + call void @use1(i1 %rt) + ret i1 %rt +} + +define i1 @check_and_fail2(i8 %x, i8 %y) { +; CHECK-LABEL: @check_and_fail2( +; CHECK-NEXT: [[Z:%.*]] = and i8 [[X:%.*]], 123 +; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[Z]], 0 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: false: +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; CHECK: true: +; CHECK-NEXT: call void @use1(i1 [[NE]]) +; CHECK-NEXT: ret i1 [[NE]] +; + %z = and i8 %x, 123 + %ne = icmp ne i8 %z, 0 + br i1 %ne, label %true, label %false +false: + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +true: + ;; So that branch doesn't fold to select. + call void @use1(i1 %ne) + ret i1 %ne +} + +define i1 @check_and_neg(i8 %x, i8 %y) { +; CHECK-LABEL: @check_and_neg( +; CHECK-NEXT: [[Z:%.*]] = and i8 [[X:%.*]], 123 +; CHECK-NEXT: [[Z2:%.*]] = sub i8 0, [[Z]] +; CHECK-NEXT: [[NE:%.*]] = icmp sgt i8 [[Z2]], -1 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: true: +; CHECK-NEXT: [[RT:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: call void @use1(i1 [[RT]]) +; CHECK-NEXT: ret i1 [[RT]] +; CHECK: false: +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; + %z = and i8 %x, 123 + %z2 = sub i8 0, %z + %ne = icmp sgt i8 %z2, -1 + br i1 %ne, label %true, label %false +true: + ;; This is mostly to test that we don't constant fold this the + ;; wrong-way (false), but should in fact be foldable to true. Maybe + ;; an `isKnownZero` that goes through the same level of analysis as + ;; `isKnownNonZero` would be useful. + %rt = icmp eq i8 %x, 0 + ;; So that branch doesn't fold to select. + call void @use1(i1 %rt) + ret i1 %rt +false: + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +} + +define i1 @check_and_ctpop_neg(i8 %x, i8 %y) { +; CHECK-LABEL: @check_and_ctpop_neg( +; CHECK-NEXT: [[Z:%.*]] = and i8 [[X:%.*]], 123 +; CHECK-NEXT: [[Z2:%.*]] = sub i8 0, [[Z]] +; CHECK-NEXT: [[Z3:%.*]] = call i8 @llvm.ctpop.i8(i8 [[Z2]]) +; CHECK-NEXT: [[NE:%.*]] = icmp eq i8 [[Z3]], 0 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: true: +; CHECK-NEXT: [[RT:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: call void @use1(i1 [[RT]]) +; CHECK-NEXT: ret i1 [[RT]] +; CHECK: false: +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; + %z = and i8 %x, 123 + %z2 = sub i8 0, %z + %z3 = call i8 @llvm.ctpop.i8(i8 %z2) + %ne = icmp eq i8 %z3, 0 + br i1 %ne, label %true, label %false +true: + ;; This is mostly to test that we don't constant fold this the + ;; wrong-way (false), but should in fact be foldable to true. Maybe + ;; an `isKnownZero` that goes through the same level of analysis as + ;; `isKnownNonZero` would be useful. + %rt = icmp eq i8 %x, 0 + ;; So that branch doesn't fold to select. + call void @use1(i1 %rt) + ret i1 %rt +false: + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +} + +define i1 @check_and_ctpop_neg_fail(i8 %x, i8 %y) { +; CHECK-LABEL: @check_and_ctpop_neg_fail( +; CHECK-NEXT: [[Z:%.*]] = and i8 [[X:%.*]], 123 +; CHECK-NEXT: [[Z2:%.*]] = sub i8 0, [[Z]] +; CHECK-NEXT: [[Z3:%.*]] = call i8 @llvm.ctpop.i8(i8 [[Z2]]) +; CHECK-NEXT: [[NE:%.*]] = icmp sle i8 [[Z3]], 0 +; CHECK-NEXT: br i1 [[NE]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: true: +; CHECK-NEXT: [[RT:%.*]] = icmp eq i8 [[X]], 0 +; CHECK-NEXT: call void @use1(i1 [[RT]]) +; CHECK-NEXT: ret i1 [[RT]] +; CHECK: false: +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; + %z = and i8 %x, 123 + %z2 = sub i8 0, %z + %z3 = call i8 @llvm.ctpop.i8(i8 %z2) + %ne = icmp sle i8 %z3, 0 + br i1 %ne, label %true, label %false +true: + ;; This is mostly to test that we don't constant fold this the + ;; wrong-way (false), but should in fact be foldable to true. Maybe + ;; an `isKnownZero` that goes through the same level of analysis as + ;; `isKnownNonZero` would be useful. + %rt = icmp eq i8 %x, 0 + ;; So that branch doesn't fold to select. + call void @use1(i1 %rt) + ret i1 %rt +false: + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +} + +define i1 @check_and_ctpop_neg_bitreverse(i8 %x, i8 %y) { +; CHECK-LABEL: @check_and_ctpop_neg_bitreverse( +; CHECK-NEXT: [[Z:%.*]] = and i8 [[X:%.*]], 123 +; CHECK-NEXT: [[Z2:%.*]] = sub i8 0, [[Z]] +; CHECK-NEXT: [[Z3:%.*]] = call i8 @llvm.ctpop.i8(i8 [[Z2]]) +; CHECK-NEXT: [[Z4:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[Z3]]) +; CHECK-NEXT: [[NE:%.*]] = icmp ne i8 [[Z4]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[NE]]) +; CHECK-NEXT: [[CMP0:%.*]] = icmp ugt i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y]], 0 +; CHECK-NEXT: [[R:%.*]] = or i1 [[CMP0]], [[CMP1]] +; CHECK-NEXT: ret i1 [[R]] +; + %z = and i8 %x, 123 + %z2 = sub i8 0, %z + %z3 = call i8 @llvm.ctpop.i8(i8 %z2) + %z4 = call i8 @llvm.bitreverse.i8(i8 %z3) + %ne = icmp ne i8 %z4, 0 + call void @llvm.assume(i1 %ne) + %cmp0 = icmp ugt i8 %x, %y + %cmp1 = icmp eq i8 %y, 0 + %r = or i1 %cmp0, %cmp1 + ret i1 %r +}