diff --git a/llvm/test/Transforms/InstCombine/and-or-xor-lowbit-simplified.ll b/llvm/test/Transforms/InstCombine/and-or-xor-lowbit-simplified.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/and-or-xor-lowbit-simplified.ll @@ -0,0 +1,345 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + + +define i1 @cmp_ugt_0_sub_and_eval(i32 %x, i32 %C) { +; CHECK-LABEL: @cmp_ugt_0_sub_and_eval( +; CHECK-NEXT: [[Y:%.*]] = add i32 [[X:%.*]], 1 +; CHECK-NEXT: [[Z:%.*]] = and i32 [[Y]], [[X]] +; CHECK-NEXT: [[B:%.*]] = and i32 [[Z]], 1 +; CHECK-NEXT: [[E:%.*]] = icmp ne i32 [[B]], 0 +; CHECK-NEXT: ret i1 [[E]] +; + %C1 = or i32 %C, 129 + %y = sub i32 %x, %C1 + %z = and i32 %x, %y + %b = and i32 %z, 1 + %e = icmp ugt i32 %b, 0 + ret i1 %e +} + +define i1 @cmp_eq_0_sub2_and_eval(i32 %x, i32 %C) { +; CHECK-LABEL: @cmp_eq_0_sub2_and_eval( +; CHECK-NEXT: ret i1 true +; + %C1 = or i32 %C, 19 + %y = sub i32 %C1, %x + %z = and i32 %x, %y + %b = and i32 %z, 1 + %e = icmp eq i32 %b, 0 + ret i1 %e +} + +define i1 @cmp_ne_0_sub_xor_eval(i32 %x, i32 %C) { +; CHECK-LABEL: @cmp_ne_0_sub_xor_eval( +; CHECK-NEXT: ret i1 true +; + %C1 = or i32 %C, 13 + %y = sub i32 %x, %C1 + %z = xor i32 %x, %y + %b = and i32 %z, 1 + %e = icmp ne i32 %b, 0 + ret i1 %e +} + +define i1 @cmp_sgt_0_add_or_eval(i32 %x, i32 %C) { +; CHECK-LABEL: @cmp_sgt_0_add_or_eval( +; CHECK-NEXT: ret i1 true +; + %C1 = or i32 %C, 9 + %y = add i32 %x, %C1 + %z = or i32 %x, %y + %b = and i32 %z, 1 + %e = icmp sgt i32 %b, 0 + ret i1 %e +} + +define i1 @cmp_ne_0_sub_or_eval(i32 %x, i32 %C) { +; CHECK-LABEL: @cmp_ne_0_sub_or_eval( +; CHECK-NEXT: ret i1 true +; + %C1 = or i32 %C, 5 + %y = sub i32 %x, %C1 + %z = or i32 %x, %y + %b = and i32 %z, 1 + %e = icmp ne i32 %b, 0 + ret i1 %e +} + +define i1 @cmp_eq_0_sub2_or_eval(i32 %x, i32 %C) { +; CHECK-LABEL: @cmp_eq_0_sub2_or_eval( +; CHECK-NEXT: ret i1 false +; + %C1 = or i32 %C, 3 + %y = sub i32 %C1, %x + %z = or i32 %x, %y + %b = and i32 %z, 1 + %e = icmp eq i32 %b, 0 + ret i1 %e +} + +define i1 @cmp_ne_0_add_and_no_eval(i32 %x, i32 %C) { +; CHECK-LABEL: @cmp_ne_0_add_and_no_eval( +; CHECK-NEXT: [[Y:%.*]] = add i32 [[C:%.*]], [[X:%.*]] +; CHECK-NEXT: [[Z:%.*]] = and i32 [[Y]], [[X]] +; CHECK-NEXT: [[B:%.*]] = and i32 [[Z]], 1 +; CHECK-NEXT: [[E:%.*]] = icmp ne i32 [[B]], 0 +; CHECK-NEXT: ret i1 [[E]] +; + %C1 = or i32 %C, 132 + %y = add i32 %C1, %x + %z = and i32 %x, %y + %b = and i32 %z, 1 + %e = icmp ne i32 %b, 0 + ret i1 %e +} + + +define i1 @cmp_eq_0_sub2_and_no_eval(i32 %x, i32 %C) { +; CHECK-LABEL: @cmp_eq_0_sub2_and_no_eval( +; CHECK-NEXT: [[Y:%.*]] = sub i32 [[C:%.*]], [[X:%.*]] +; CHECK-NEXT: [[Z:%.*]] = and i32 [[Y]], [[X]] +; CHECK-NEXT: [[B:%.*]] = and i32 [[Z]], 1 +; CHECK-NEXT: [[E:%.*]] = icmp eq i32 [[B]], 0 +; CHECK-NEXT: ret i1 [[E]] +; + %C1 = or i32 %C, 8 + %y = sub i32 %C1, %x + %z = and i32 %x, %y + %b = and i32 %z, 1 + %e = icmp eq i32 %b, 0 + ret i1 %e +} + + +define i1 @cmp_ne_0_sub_xor_no_eval(i32 %x, i32 %C) { +; CHECK-LABEL: @cmp_ne_0_sub_xor_no_eval( +; CHECK-NEXT: [[Y:%.*]] = sub i32 [[X:%.*]], [[C:%.*]] +; CHECK-NEXT: [[Z:%.*]] = xor i32 [[Y]], [[X]] +; CHECK-NEXT: [[B:%.*]] = and i32 [[Z]], 1 +; CHECK-NEXT: [[E:%.*]] = icmp ne i32 [[B]], 0 +; CHECK-NEXT: ret i1 [[E]] +; + %C1 = or i32 %C, 2 + %y = sub i32 %x, %C1 + %z = xor i32 %x, %y + %b = and i32 %z, 1 + %e = icmp ne i32 %b, 0 + ret i1 %e +} + +define i1 @cmp_sgt_0_sub2_xor_no_eval(i32 %x, i32 %C) { +; CHECK-LABEL: @cmp_sgt_0_sub2_xor_no_eval( +; CHECK-NEXT: [[Y:%.*]] = sub i32 [[C:%.*]], [[X:%.*]] +; CHECK-NEXT: [[Z:%.*]] = xor i32 [[Y]], [[X]] +; CHECK-NEXT: [[B:%.*]] = and i32 [[Z]], 1 +; CHECK-NEXT: [[E:%.*]] = icmp ne i32 [[B]], 0 +; CHECK-NEXT: ret i1 [[E]] +; + %C1 = or i32 %C, 24 + %y = sub i32 %C1, %x + %z = xor i32 %x, %y + %b = and i32 %z, 1 + %e = icmp sgt i32 %b, 0 + ret i1 %e +} + +define i1 @cmp_ne_0_sub_or_no_eval(i32 %x, i32 %C) { +; CHECK-LABEL: @cmp_ne_0_sub_or_no_eval( +; CHECK-NEXT: [[Y:%.*]] = sub i32 [[X:%.*]], [[C:%.*]] +; CHECK-NEXT: [[Z:%.*]] = or i32 [[Y]], [[X]] +; CHECK-NEXT: [[B:%.*]] = and i32 [[Z]], 1 +; CHECK-NEXT: [[E:%.*]] = icmp ne i32 [[B]], 0 +; CHECK-NEXT: ret i1 [[E]] +; + %C1 = or i32 %C, 4 + %y = sub i32 %x, %C1 + %z = or i32 %x, %y + %b = and i32 %z, 1 + %e = icmp ne i32 %b, 0 + ret i1 %e +} + +define i1 @cmp_eq_0_sub2_or_no_eval(i32 %x, i32 %C) { +; CHECK-LABEL: @cmp_eq_0_sub2_or_no_eval( +; CHECK-NEXT: [[Y:%.*]] = sub i32 [[C:%.*]], [[X:%.*]] +; CHECK-NEXT: [[Z:%.*]] = or i32 [[Y]], [[X]] +; CHECK-NEXT: [[B:%.*]] = and i32 [[Z]], 1 +; CHECK-NEXT: [[E:%.*]] = icmp eq i32 [[B]], 0 +; CHECK-NEXT: ret i1 [[E]] +; + %C1 = or i32 %C, 128 + %y = sub i32 %C1, %x + %z = or i32 %x, %y + %b = and i32 %z, 1 + %e = icmp eq i32 %b, 0 + ret i1 %e +} + +define <2 x i1> @cmp_ne_0_add_and_eval_vec(<2 x i32> %x, <2 x i32> %C) { +; CHECK-LABEL: @cmp_ne_0_add_and_eval_vec( +; CHECK-NEXT: [[Y:%.*]] = add <2 x i32> [[X:%.*]], +; CHECK-NEXT: [[Z:%.*]] = and <2 x i32> [[Y]], [[X]] +; CHECK-NEXT: [[E:%.*]] = trunc <2 x i32> [[Z]] to <2 x i1> +; CHECK-NEXT: ret <2 x i1> [[E]] +; + %C1 = or <2 x i32> %C, + %y = add <2 x i32> %C1, %x + %z = and <2 x i32> %x, %y + %b = and <2 x i32> %z, + %e = icmp ne <2 x i32> %b, + ret <2 x i1> %e +} + + +define <2 x i1> @cmp_eq_0_sub2_and_eval_vec(<2 x i32> %x, <2 x i32> %C) { +; CHECK-LABEL: @cmp_eq_0_sub2_and_eval_vec( +; CHECK-NEXT: ret <2 x i1> +; + %C1 = or <2 x i32> %C, + %y = sub <2 x i32> %C1, %x + %z = and <2 x i32> %x, %y + %b = and <2 x i32> %z, + %e = icmp eq <2 x i32> %b, + ret <2 x i1> %e +} + +define <2 x i1> @cmp_ne_0_sub_xor_eval_vec(<2 x i32> %x, <2 x i32> %C) { +; CHECK-LABEL: @cmp_ne_0_sub_xor_eval_vec( +; CHECK-NEXT: ret <2 x i1> +; + %C1 = or <2 x i32> %C, + %y = sub <2 x i32> %x, %C1 + %z = xor <2 x i32> %x, %y + %b = and <2 x i32> %z, + %e = icmp ne <2 x i32> %b, + ret <2 x i1> %e +} + +define <2 x i1> @cmp_sgt_0_sub2_xor_eval_vec(<2 x i32> %x, <2 x i32> %C) { +; CHECK-LABEL: @cmp_sgt_0_sub2_xor_eval_vec( +; CHECK-NEXT: ret <2 x i1> +; + %C1 = or <2 x i32> %C, + %y = sub <2 x i32> %C1, %x + %z = xor <2 x i32> %x, %y + %b = and <2 x i32> %z, + %e = icmp sgt <2 x i32> %b, + ret <2 x i1> %e +} + +define <2 x i1> @cmp_sgt_0_add_or_eval_vec(<2 x i32> %x, <2 x i32> %C) { +; CHECK-LABEL: @cmp_sgt_0_add_or_eval_vec( +; CHECK-NEXT: ret <2 x i1> +; + %C1 = or <2 x i32> %C, + %y = add <2 x i32> %x, %C1 + %z = or <2 x i32> %x, %y + %b = and <2 x i32> %z, + %e = icmp sgt <2 x i32> %b, + ret <2 x i1> %e +} + +define <2 x i1> @cmp_eq_0_sub2_or_eval_vec(<2 x i32> %x, <2 x i32> %C) { +; CHECK-LABEL: @cmp_eq_0_sub2_or_eval_vec( +; CHECK-NEXT: ret <2 x i1> zeroinitializer +; + %C1 = or <2 x i32> %C, + %y = sub <2 x i32> %C1, %x + %z = or <2 x i32> %x, %y + %b = and <2 x i32> %z, + %e = icmp eq <2 x i32> %b, + ret <2 x i1> %e +} + +define <2 x i1> @cmp_ne_0_add_and_no_eval_vec(<2 x i32> %x, <2 x i32> %C) { +; CHECK-LABEL: @cmp_ne_0_add_and_no_eval_vec( +; CHECK-NEXT: [[Y:%.*]] = add <2 x i32> [[C:%.*]], [[X:%.*]] +; CHECK-NEXT: [[Z:%.*]] = and <2 x i32> [[Y]], [[X]] +; CHECK-NEXT: [[E:%.*]] = trunc <2 x i32> [[Z]] to <2 x i1> +; CHECK-NEXT: ret <2 x i1> [[E]] +; + %C1 = or <2 x i32> %C, + %y = add <2 x i32> %C1, %x + %z = and <2 x i32> %x, %y + %b = and <2 x i32> %z, + %e = icmp ne <2 x i32> %b, + ret <2 x i1> %e +} + +define <2 x i1> @cmp_ugt_0_sub_and_no_eval_vec(<2 x i32> %x, <2 x i32> %C) { +; CHECK-LABEL: @cmp_ugt_0_sub_and_no_eval_vec( +; CHECK-NEXT: [[Y:%.*]] = sub <2 x i32> [[X:%.*]], [[C:%.*]] +; CHECK-NEXT: [[Z:%.*]] = and <2 x i32> [[Y]], [[X]] +; CHECK-NEXT: [[E:%.*]] = trunc <2 x i32> [[Z]] to <2 x i1> +; CHECK-NEXT: ret <2 x i1> [[E]] +; + %C1 = or <2 x i32> %C, + %y = sub <2 x i32> %x, %C1 + %z = and <2 x i32> %x, %y + %b = and <2 x i32> %z, + %e = icmp ugt <2 x i32> %b, + ret <2 x i1> %e +} + +define <2 x i1> @cmp_eq_0_add_xor_no_eval_vec(<2 x i32> %x, <2 x i32> %C) { +; CHECK-LABEL: @cmp_eq_0_add_xor_no_eval_vec( +; CHECK-NEXT: [[Y:%.*]] = add <2 x i32> [[C:%.*]], [[X:%.*]] +; CHECK-NEXT: [[Z:%.*]] = xor <2 x i32> [[Y]], [[X]] +; CHECK-NEXT: [[B:%.*]] = and <2 x i32> [[Z]], +; CHECK-NEXT: [[E:%.*]] = icmp eq <2 x i32> [[B]], zeroinitializer +; CHECK-NEXT: ret <2 x i1> [[E]] +; + %C1 = or <2 x i32> %C, + %y = add <2 x i32> %x, %C1 + %z = xor <2 x i32> %x, %y + %b = and <2 x i32> %z, + %e = icmp eq <2 x i32> %b, + ret <2 x i1> %e +} + +define <2 x i1> @cmp_ne_0_sub_xor_no_eval_vec(<2 x i32> %x, <2 x i32> %C) { +; CHECK-LABEL: @cmp_ne_0_sub_xor_no_eval_vec( +; CHECK-NEXT: [[Y:%.*]] = sub <2 x i32> [[X:%.*]], [[C:%.*]] +; CHECK-NEXT: [[Z:%.*]] = xor <2 x i32> [[Y]], [[X]] +; CHECK-NEXT: [[E:%.*]] = trunc <2 x i32> [[Z]] to <2 x i1> +; CHECK-NEXT: ret <2 x i1> [[E]] +; + %C1 = or <2 x i32> %C, + %y = sub <2 x i32> %x, %C1 + %z = xor <2 x i32> %x, %y + %b = and <2 x i32> %z, + %e = icmp ne <2 x i32> %b, + ret <2 x i1> %e +} + +define <2 x i1> @cmp_sgt_0_add_or_no_eval_vec(<2 x i32> %x, <2 x i32> %C) { +; CHECK-LABEL: @cmp_sgt_0_add_or_no_eval_vec( +; CHECK-NEXT: [[Y:%.*]] = add <2 x i32> [[C:%.*]], [[X:%.*]] +; CHECK-NEXT: [[Z:%.*]] = or <2 x i32> [[Y]], [[X]] +; CHECK-NEXT: [[E:%.*]] = trunc <2 x i32> [[Z]] to <2 x i1> +; CHECK-NEXT: ret <2 x i1> [[E]] +; + %C1 = or <2 x i32> %C, + %y = add <2 x i32> %x, %C1 + %z = or <2 x i32> %x, %y + %b = and <2 x i32> %z, + %e = icmp sgt <2 x i32> %b, + ret <2 x i1> %e +} + +define <2 x i1> @cmp_eq_0_sub2_or_no_eval_vec(<2 x i32> %x, <2 x i32> %C) { +; CHECK-LABEL: @cmp_eq_0_sub2_or_no_eval_vec( +; CHECK-NEXT: [[Y:%.*]] = sub <2 x i32> [[C:%.*]], [[X:%.*]] +; CHECK-NEXT: [[Z:%.*]] = or <2 x i32> [[Y]], [[X]] +; CHECK-NEXT: [[B:%.*]] = and <2 x i32> [[Z]], +; CHECK-NEXT: [[E:%.*]] = icmp eq <2 x i32> [[B]], zeroinitializer +; CHECK-NEXT: ret <2 x i1> [[E]] +; + %C1 = or <2 x i32> %C, + %y = sub <2 x i32> %C1, %x + %z = or <2 x i32> %x, %y + %b = and <2 x i32> %z, + %e = icmp eq <2 x i32> %b, + ret <2 x i1> %e +}