diff --git a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll @@ -0,0 +1,189 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +declare void @llvm.assume(i1) +declare void @barrier() + +define i1 @xor_uge(i8 %x, i8 %y) { +; CHECK-LABEL: @xor_uge( +; CHECK-NEXT: [[YNZ:%.*]] = icmp ne i8 [[Y:%.*]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[YNZ]]) +; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[XOR]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %ynz = icmp ne i8 %y, 0 + call void @llvm.assume(i1 %ynz) + %xor = xor i8 %x, %y + %r = icmp uge i8 %xor, %x + ret i1 %r +} + +define i1 @xor_uge_fail_maybe_zero(i8 %x, i8 %y) { +; CHECK-LABEL: @xor_uge_fail_maybe_zero( +; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[XOR]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %xor = xor i8 %x, %y + %r = icmp uge i8 %xor, %x + ret i1 %r +} + +define <2 x i1> @xor_ule_2(<2 x i8> %x, <2 x i8> %yy) { +; CHECK-LABEL: @xor_ule_2( +; CHECK-NEXT: [[Y:%.*]] = or <2 x i8> [[YY:%.*]], +; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[Y]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule <2 x i8> [[XOR]], [[X]] +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %y = or <2 x i8> %yy, + %xor = xor <2 x i8> %y, %x + %r = icmp ule <2 x i8> %xor, %x + ret <2 x i1> %r +} + +define i1 @xor_sle_2(i8 %xx, i8 %y, i8 %z) { +; CHECK-LABEL: @xor_sle_2( +; CHECK-NEXT: [[X:%.*]] = add i8 [[XX:%.*]], [[Z:%.*]] +; CHECK-NEXT: [[YNZ:%.*]] = icmp ne i8 [[Y:%.*]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[YNZ]]) +; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[X]], [[XOR]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = add i8 %xx, %z + %ynz = icmp ne i8 %y, 0 + call void @llvm.assume(i1 %ynz) + %xor = xor i8 %x, %y + %r = icmp sle i8 %x, %xor + ret i1 %r +} + +define i1 @xor_sge(i8 %xx, i8 %yy) { +; CHECK-LABEL: @xor_sge( +; CHECK-NEXT: [[X:%.*]] = mul i8 [[XX:%.*]], [[XX]] +; CHECK-NEXT: [[Y:%.*]] = or i8 [[YY:%.*]], -128 +; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[Y]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[X]], [[XOR]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = mul i8 %xx, %xx + %y = or i8 %yy, 128 + %xor = xor i8 %y, %x + %r = icmp sge i8 %x, %xor + ret i1 %r +} + +define i1 @xor_ugt_2(i8 %xx, i8 %y, i8 %z) { +; CHECK-LABEL: @xor_ugt_2( +; CHECK-NEXT: [[X:%.*]] = add i8 [[XX:%.*]], [[Z:%.*]] +; CHECK-NEXT: [[YZ:%.*]] = and i8 [[Y:%.*]], 63 +; CHECK-NEXT: [[Y1:%.*]] = or i8 [[YZ]], 64 +; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X]], [[Y1]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X]], [[XOR]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = add i8 %xx, %z + %yz = and i8 %y, 63 + %y1 = or i8 %yz, 64 + %xor = xor i8 %x, %y1 + %r = icmp ugt i8 %x, %xor + ret i1 %r +} + +define i1 @xor_ult(i8 %x) { +; CHECK-LABEL: @xor_ult( +; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], 123 +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[XOR]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %xor = xor i8 %x, 123 + %r = icmp ult i8 %xor, %x + ret i1 %r +} + +define <2 x i1> @xor_sgt(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @xor_sgt( +; CHECK-NEXT: [[YZ:%.*]] = and <2 x i8> [[Y:%.*]], +; CHECK-NEXT: [[Y1:%.*]] = or <2 x i8> [[YZ]], +; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[Y1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i8> [[XOR]], [[X]] +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %yz = and <2 x i8> %y, + %y1 = or <2 x i8> %yz, + %xor = xor <2 x i8> %x, %y1 + %r = icmp sgt <2 x i8> %xor, %x + ret <2 x i1> %r +} + +define <2 x i1> @xor_sgt_fail_no_known_msb(<2 x i8> %x, <2 x i8> %y) { +; CHECK-LABEL: @xor_sgt_fail_no_known_msb( +; CHECK-NEXT: [[YZ:%.*]] = and <2 x i8> [[Y:%.*]], +; CHECK-NEXT: [[Y1:%.*]] = or <2 x i8> [[YZ]], +; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[Y1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i8> [[XOR]], [[X]] +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %yz = and <2 x i8> %y, + %y1 = or <2 x i8> %yz, + %xor = xor <2 x i8> %x, %y1 + %r = icmp sgt <2 x i8> %xor, %x + ret <2 x i1> %r +} + +define i1 @xor_slt_2(i8 %x, i8 %y, i8 %z) { +; CHECK-LABEL: @xor_slt_2( +; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], 88 +; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[XOR]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %xor = xor i8 %x, 88 + %r = icmp slt i8 %x, %xor + ret i1 %r +} + +define <2 x i1> @xor_sgt_intmin_2(<2 x i8> %xx, <2 x i8> %yy, <2 x i8> %z) { +; CHECK-LABEL: @xor_sgt_intmin_2( +; CHECK-NEXT: [[X:%.*]] = add <2 x i8> [[XX:%.*]], [[Z:%.*]] +; CHECK-NEXT: [[Y:%.*]] = or <2 x i8> [[YY:%.*]], +; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[X]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i8> [[X]], [[XOR]] +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %x = add <2 x i8> %xx, %z + %y = or <2 x i8> %yy, + %xor = xor <2 x i8> %x, %y + %r = icmp sgt <2 x i8> %x, %xor + ret <2 x i1> %r +} + +define i1 @or_slt_intmin_indirect(i8 %x, i8 %C) { +; CHECK-LABEL: @or_slt_intmin_indirect( +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[C:%.*]], 0 +; CHECK-NEXT: br i1 [[CMP]], label [[NEG:%.*]], label [[POS:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i1 [ [[R:%.*]], [[NEG]] ], [ false, [[POS]] ] +; CHECK-NEXT: ret i1 [[COMMON_RET_OP]] +; CHECK: neg: +; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[C]], [[X:%.*]] +; CHECK-NEXT: [[R]] = icmp slt i8 [[XOR]], [[X]] +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: pos: +; CHECK-NEXT: tail call void @barrier() +; CHECK-NEXT: br label [[COMMON_RET]] +; + %cmp = icmp slt i8 %C, 0 + br i1 %cmp, label %neg, label %pos +common.ret: + %common.ret.op = phi i1 [ %r, %neg ], [ false, %pos ] + ret i1 %common.ret.op +neg: + %xor = xor i8 %C, %x + %r = icmp slt i8 %xor, %x + br label %common.ret +pos: + tail call void @barrier() + br label %common.ret +}