diff --git a/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll b/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll @@ -0,0 +1,651 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +declare void @llvm.assume(i1) +declare i8 @llvm.ctpop.i8(i8) +declare i8 @llvm.umin.i8(i8, i8) +declare i8 @llvm.umax.i8(i8, i8) +declare i8 @llvm.smin.i8(i8, i8) +declare i8 @llvm.smax.i8(i8, i8) +declare i8 @llvm.bitreverse.i8(i8) +declare void @use.i8(i8) +declare void @use.i16(i16) +define i1 @src_is_mask_zext(i16 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_zext( +; CHECK-NEXT: [[X:%.*]] = xor i16 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[M_IN:%.*]] = lshr i8 -1, [[Y:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = zext i8 [[M_IN]] to i16 +; CHECK-NEXT: [[AND:%.*]] = and i16 [[X]], [[MASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i16 %x_in, 123 + %m_in = lshr i8 -1, %y + %mask = zext i8 %m_in to i16 + + %and = and i16 %x, %mask + %r = icmp eq i16 %and, %x + ret i1 %r +} + +define i1 @src_is_mask_zext_fail_not_mask(i16 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_zext_fail_not_mask( +; CHECK-NEXT: [[X:%.*]] = xor i16 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[M_IN:%.*]] = lshr i8 -2, [[Y:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = zext i8 [[M_IN]] to i16 +; CHECK-NEXT: [[AND:%.*]] = and i16 [[X]], [[MASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i16 %x_in, 123 + %m_in = lshr i8 -2, %y + %mask = zext i8 %m_in to i16 + + %and = and i16 %x, %mask + %r = icmp eq i16 %and, %x + ret i1 %r +} + +define i1 @src_is_mask_sext(i16 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_sext( +; CHECK-NEXT: [[X:%.*]] = xor i16 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[M_IN:%.*]] = lshr i8 31, [[Y:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = zext i8 [[M_IN]] to i16 +; CHECK-NEXT: [[NOTMASK:%.*]] = xor i16 [[MASK]], -1 +; CHECK-NEXT: [[AND:%.*]] = and i16 [[X]], [[NOTMASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i16 %x_in, 123 + %m_in = lshr i8 31, %y + %mask = sext i8 %m_in to i16 + %notmask = xor i16 %mask, -1 + + %and = and i16 %notmask, %x + %r = icmp eq i16 %and, 0 + ret i1 %r +} + +define i1 @src_is_mask_sext_fail_multiuse(i16 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_sext_fail_multiuse( +; CHECK-NEXT: [[X:%.*]] = xor i16 [[X_IN:%.*]], 122 +; CHECK-NEXT: [[M_IN:%.*]] = lshr i8 -1, [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[M_IN]], -1 +; CHECK-NEXT: [[NOTMASK:%.*]] = sext i8 [[TMP1]] to i16 +; CHECK-NEXT: [[AND:%.*]] = and i16 [[X]], [[NOTMASK]] +; CHECK-NEXT: call void @use.i16(i16 [[AND]]) +; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i16 %x_in, 123 + %m_in = lshr i8 -1, %y + %mask = sext i8 %m_in to i16 + %notmask = xor i16 %mask, -1 + + %and = and i16 %notmask, %x + call void @use.i16(i16 %and) + %r = icmp eq i16 %and, 0 + ret i1 %r +} + +define i1 @src_is_mask_and(i8 %x_in, i8 %y, i8 %z) { +; CHECK-LABEL: @src_is_mask_and( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[MY:%.*]] = lshr i8 7, [[Y:%.*]] +; CHECK-NEXT: [[MZ:%.*]] = lshr i8 -1, [[Z:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = and i8 [[MY]], [[MZ]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[MASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], [[AND]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %my = ashr i8 7, %y + %mz = lshr i8 -1, %z + %mask = and i8 %my, %mz + + %and = and i8 %x, %mask + %r = icmp eq i8 %x, %and + ret i1 %r +} + +define i1 @src_is_mask_and_fail_mixed(i8 %x_in, i8 %y, i8 %z) { +; CHECK-LABEL: @src_is_mask_and_fail_mixed( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[MY:%.*]] = ashr i8 -8, [[Y:%.*]] +; CHECK-NEXT: [[MZ:%.*]] = lshr i8 -1, [[Z:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = and i8 [[MY]], [[MZ]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[MASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], [[AND]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %my = ashr i8 -8, %y + %mz = lshr i8 -1, %z + %mask = and i8 %my, %mz + + %and = and i8 %x, %mask + %r = icmp eq i8 %x, %and + ret i1 %r +} + +define i1 @src_is_mask_or(i8 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_or( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[MY:%.*]] = lshr i8 -1, [[Y:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = and i8 [[MY]], 7 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], [[AND]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %my = lshr i8 -1, %y + %mask = and i8 %my, 7 + + %and = and i8 %mask, %x + %r = icmp eq i8 %x, %and + ret i1 %r +} + +define i1 @src_is_mask_xor(i8 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_xor( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[MASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[MASK]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %y_m1 = add i8 %y, -1 + %mask = xor i8 %y, %y_m1 + %and = and i8 %x, %mask + %r = icmp ne i8 %and, %x + ret i1 %r +} + +define i1 @src_is_mask_xor_fail_notmask(i8 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_xor_fail_notmask( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[Y_M1_NOT:%.*]] = sub i8 0, [[Y:%.*]] +; CHECK-NEXT: [[NOTMASK:%.*]] = xor i8 [[Y_M1_NOT]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[NOTMASK]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %y_m1 = add i8 %y, -1 + %mask = xor i8 %y, %y_m1 + %notmask = xor i8 %mask, -1 + %and = and i8 %x, %notmask + %r = icmp ne i8 %and, %x + ret i1 %r +} + +define i1 @src_is_mask_select(i8 %x_in, i8 %y, i1 %cond) { +; CHECK-LABEL: @src_is_mask_select( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %y_m1 = add i8 %y, -1 + %ymask = xor i8 %y, %y_m1 + %mask = select i1 %cond, i8 %ymask, i8 15 + + %and = and i8 %mask, %x + %r = icmp ne i8 %and, %x + ret i1 %r +} + +define i1 @src_is_mask_select_fail_wrong_pattern(i8 %x_in, i8 %y, i1 %cond, i8 %z) { +; CHECK-LABEL: @src_is_mask_select_fail_wrong_pattern( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[Z:%.*]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %y_m1 = add i8 %y, -1 + %ymask = xor i8 %y, %y_m1 + %mask = select i1 %cond, i8 %ymask, i8 15 + + %and = and i8 %mask, %x + %r = icmp ne i8 %and, %z + ret i1 %r +} + +define i1 @src_is_mask_shl_lshr(i8 %x_in, i8 %y, i1 %cond) { +; CHECK-LABEL: @src_is_mask_shl_lshr( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 122 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 -1, [[Y:%.*]] +; CHECK-NEXT: [[NOTMASK:%.*]] = xor i8 [[TMP1]], -1 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[NOTMASK]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %m_shl = shl i8 -1, %y + %mask = lshr i8 %m_shl, %y + %notmask = xor i8 %mask, -1 + + %and = and i8 %x, %notmask + %r = icmp ne i8 0, %and + ret i1 %r +} + +define i1 @src_is_mask_shl_lshr_fail_not_allones(i8 %x_in, i8 %y, i1 %cond) { +; CHECK-LABEL: @src_is_mask_shl_lshr_fail_not_allones( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 -1, [[Y:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = and i8 [[TMP1]], -2 +; CHECK-NEXT: [[NOTMASK:%.*]] = xor i8 [[MASK]], -1 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[NOTMASK]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %m_shl = shl i8 -2, %y + %mask = lshr i8 %m_shl, %y + %notmask = xor i8 %mask, -1 + + %and = and i8 %x, %notmask + %r = icmp ne i8 0, %and + ret i1 %r +} + +define i1 @src_is_mask_lshr(i8 %x_in, i8 %y, i8 %z, i1 %cond) { +; CHECK-LABEL: @src_is_mask_lshr( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[SMASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15 +; CHECK-NEXT: [[MASK:%.*]] = lshr i8 [[SMASK]], [[Z:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X]], [[AND]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %y_m1 = add i8 %y, -1 + %ymask = xor i8 %y, %y_m1 + %smask = select i1 %cond, i8 %ymask, i8 15 + %mask = lshr i8 %smask, %z + %and = and i8 %mask, %x + %r = icmp ne i8 %x, %and + ret i1 %r +} + +define i1 @src_is_mask_ashr(i8 %x_in, i8 %y, i8 %z, i1 %cond) { +; CHECK-LABEL: @src_is_mask_ashr( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[SMASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15 +; CHECK-NEXT: [[MASK:%.*]] = ashr i8 [[SMASK]], [[Z:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[MASK]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %y_m1 = add i8 %y, -1 + %ymask = xor i8 %y, %y_m1 + %smask = select i1 %cond, i8 %ymask, i8 15 + %mask = ashr i8 %smask, %z + %and = and i8 %x, %mask + %r = icmp ult i8 %and, %x + ret i1 %r +} + +define i1 @src_is_mask_p2_m1(i8 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_p2_m1( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[P2ORZ:%.*]] = shl i8 2, [[Y:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = add i8 [[P2ORZ]], -1 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %p2orz = shl i8 2, %y + %mask = add i8 %p2orz, -1 + %and = and i8 %mask, %x + %r = icmp ult i8 %and, %x + ret i1 %r +} + +define i1 @src_is_mask_umax(i8 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_umax( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.umax.i8(i8 [[YMASK]], i8 3) +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[MASK]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %y_m1 = add i8 %y, -1 + %ymask = xor i8 %y, %y_m1 + %mask = call i8 @llvm.umax.i8(i8 %ymask, i8 3) + + %and = and i8 %x, %mask + %r = icmp ugt i8 %x, %and + ret i1 %r +} + +define i1 @src_is_mask_umin(i8 %x_in, i8 %y, i8 %z) { +; CHECK-LABEL: @src_is_mask_umin( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[ZMASK:%.*]] = lshr i8 15, [[Z:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.umin.i8(i8 [[YMASK]], i8 [[ZMASK]]) +; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %y_m1 = add i8 %y, -1 + %ymask = xor i8 %y, %y_m1 + %zmask = lshr i8 15, %z + %mask = call i8 @llvm.umin.i8(i8 %ymask, i8 %zmask) + + %and = and i8 %mask, %x + %r = icmp ugt i8 %x, %and + ret i1 %r +} + +define i1 @src_is_mask_umin_fail_mismatch(i8 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_umin_fail_mismatch( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.umin.i8(i8 [[YMASK]], i8 -32) +; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %y_m1 = add i8 %y, -1 + %ymask = xor i8 %y, %y_m1 + %mask = call i8 @llvm.umin.i8(i8 %ymask, i8 -32) + + %and = and i8 %mask, %x + %r = icmp ugt i8 %x, %and + ret i1 %r +} + +define i1 @src_is_mask_smax(i8 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_smax( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.smax.i8(i8 [[YMASK]], i8 -1) +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[MASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %y_m1 = add i8 %y, -1 + %ymask = xor i8 %y, %y_m1 + %mask = call i8 @llvm.smax.i8(i8 %ymask, i8 -1) + + %and = and i8 %x, %mask + %r = icmp uge i8 %and, %x + ret i1 %r +} + +define i1 @src_is_mask_smin(i8 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_smin( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.smin.i8(i8 [[YMASK]], i8 0) +; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %y_m1 = add i8 %y, -1 + %ymask = xor i8 %y, %y_m1 + %mask = call i8 @llvm.smin.i8(i8 %ymask, i8 0) + + %and = and i8 %mask, %x + %r = icmp uge i8 %and, %x + ret i1 %r +} + +define i1 @src_is_mask_bitreverse_not_mask(i8 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_mask_bitreverse_not_mask( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[NMASK:%.*]] = shl i8 -1, [[Y:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[NMASK]]) +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[MASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %nmask = shl i8 -1, %y + %mask = call i8 @llvm.bitreverse.i8(i8 %nmask) + + %and = and i8 %x, %mask + %r = icmp ule i8 %x, %and + ret i1 %r +} + +define i1 @src_is_notmask_sext(i16 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_notmask_sext( +; CHECK-NEXT: [[X:%.*]] = xor i16 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[M_IN:%.*]] = shl i8 -8, [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[M_IN]], -1 +; CHECK-NEXT: [[MASK:%.*]] = sext i8 [[TMP1]] to i16 +; CHECK-NEXT: [[AND:%.*]] = and i16 [[X]], [[MASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i16 %x_in, 123 + %m_in = shl i8 -8, %y + %nmask = sext i8 %m_in to i16 + %mask = xor i16 %nmask, -1 + %and = and i16 %mask, %x + %r = icmp ule i16 %x, %and + ret i1 %r +} + +define i1 @src_is_notmask_shl(i8 %x_in, i8 %y, i1 %cond) { +; CHECK-LABEL: @src_is_notmask_shl( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 122 +; CHECK-NEXT: [[NMASK:%.*]] = shl i8 -1, [[Y:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[NMASK]]) +; CHECK-NEXT: [[NOTMASK0:%.*]] = xor i8 [[MASK]], -1 +; CHECK-NEXT: [[NOTMASK:%.*]] = select i1 [[COND:%.*]], i8 [[NOTMASK0]], i8 -8 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[NOTMASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %nmask = shl i8 -1, %y + %mask = call i8 @llvm.bitreverse.i8(i8 %nmask) + %notmask0 = xor i8 %mask, -1 + %notmask = select i1 %cond, i8 %notmask0, i8 -8 + %and = and i8 %x, %notmask + %r = icmp eq i8 %and, 0 + ret i1 %r +} + +define i1 @src_is_notmask_shl_fail_multiuse_invert(i8 %x_in, i8 %y, i1 %cond) { +; CHECK-LABEL: @src_is_notmask_shl_fail_multiuse_invert( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 122 +; CHECK-NEXT: [[NMASK:%.*]] = shl i8 -1, [[Y:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[NMASK]]) +; CHECK-NEXT: [[NOTMASK0:%.*]] = xor i8 [[MASK]], -1 +; CHECK-NEXT: [[NOTMASK:%.*]] = select i1 [[COND:%.*]], i8 [[NOTMASK0]], i8 -8 +; CHECK-NEXT: call void @use.i8(i8 [[NOTMASK]]) +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[NOTMASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %nmask = shl i8 -1, %y + %mask = call i8 @llvm.bitreverse.i8(i8 %nmask) + %notmask0 = xor i8 %mask, -1 + %notmask = select i1 %cond, i8 %notmask0, i8 -8 + call void @use.i8(i8 %notmask) + %and = and i8 %x, %notmask + %r = icmp eq i8 %and, 0 + ret i1 %r +} + +define i1 @src_is_notmask_lshr_shl(i8 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_notmask_lshr_shl( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[X_HIGHBITS:%.*]] = lshr i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X_HIGHBITS]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %mask_shr = lshr i8 -1, %y + %nmask = shl i8 %mask_shr, %y + %mask = xor i8 %nmask, -1 + %and = and i8 %mask, %x + %r = icmp eq i8 %and, %x + ret i1 %r +} + +define i1 @src_is_notmask_lshr_shl_fail_mismatch_shifts(i8 %x_in, i8 %y, i8 %z) { +; CHECK-LABEL: @src_is_notmask_lshr_shl_fail_mismatch_shifts( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[MASK_SHR:%.*]] = lshr i8 -1, [[Y:%.*]] +; CHECK-NEXT: [[NMASK:%.*]] = shl i8 [[MASK_SHR]], [[Z:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = xor i8 [[NMASK]], -1 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[MASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %mask_shr = lshr i8 -1, %y + %nmask = shl i8 %mask_shr, %z + %mask = xor i8 %nmask, -1 + %and = and i8 %mask, %x + %r = icmp eq i8 %and, %x + ret i1 %r +} + +define i1 @src_is_notmask_ashr(i16 %x_in, i8 %y, i16 %z) { +; CHECK-LABEL: @src_is_notmask_ashr( +; CHECK-NEXT: [[X:%.*]] = xor i16 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[M_IN:%.*]] = shl i8 -32, [[Y:%.*]] +; CHECK-NEXT: [[NMASK:%.*]] = sext i8 [[M_IN]] to i16 +; CHECK-NEXT: [[NMASK_SHR:%.*]] = ashr i16 [[NMASK]], [[Z:%.*]] +; CHECK-NEXT: [[MASK:%.*]] = xor i16 [[NMASK_SHR]], -1 +; CHECK-NEXT: [[AND:%.*]] = and i16 [[X]], [[MASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[X]], [[AND]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i16 %x_in, 123 + %m_in = shl i8 -32, %y + %nmask = sext i8 %m_in to i16 + %nmask_shr = ashr i16 %nmask, %z + %mask = xor i16 %nmask_shr, -1 + %and = and i16 %x, %mask + %r = icmp eq i16 %x, %and + ret i1 %r +} + +define i1 @src_is_notmask_neg_p2(i8 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_notmask_neg_p2( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]] +; CHECK-NEXT: [[P2:%.*]] = and i8 [[NY]], [[Y]] +; CHECK-NEXT: [[NMASK:%.*]] = sub i8 0, [[P2]] +; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[NMASK]]) +; CHECK-NEXT: [[NOTMASK:%.*]] = xor i8 [[MASK]], -1 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[NOTMASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %ny = sub i8 0, %y + %p2 = and i8 %ny, %y + %nmask = sub i8 0, %p2 + %mask = call i8 @llvm.bitreverse.i8(i8 %nmask) + %notmask = xor i8 %mask, -1 + %and = and i8 %notmask, %x + %r = icmp eq i8 0, %and + ret i1 %r +} + +define i1 @src_is_notmask_neg_p2_fail_not_invertable(i8 %x_in, i8 %y) { +; CHECK-LABEL: @src_is_notmask_neg_p2_fail_not_invertable( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]] +; CHECK-NEXT: [[P2:%.*]] = and i8 [[NY]], [[Y]] +; CHECK-NEXT: [[NOTMASK:%.*]] = sub i8 0, [[P2]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[NOTMASK]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], 0 +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %ny = sub i8 0, %y + %p2 = and i8 %ny, %y + %notmask = sub i8 0, %p2 + %and = and i8 %notmask, %x + %r = icmp eq i8 0, %and + ret i1 %r +} + +define i1 @src_is_mask_const_slt(i8 %x_in) { +; CHECK-LABEL: @src_is_mask_const_slt( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], 7 +; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[X]], [[AND]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %and = and i8 %x, 7 + %r = icmp slt i8 %x, %and + ret i1 %r +} + +define i1 @src_is_mask_const_sgt(i8 %x_in) { +; CHECK-LABEL: @src_is_mask_const_sgt( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[X]], 7 +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %and = and i8 %x, 7 + %r = icmp sgt i8 %x, %and + ret i1 %r +} + +define i1 @src_is_mask_const_sle(i8 %x_in) { +; CHECK-LABEL: @src_is_mask_const_sle( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], 31 +; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[AND]], [[X]] +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %and = and i8 %x, 31 + %r = icmp sle i8 %and, %x + ret i1 %r +} + +define i1 @src_is_mask_const_sge(i8 %x_in) { +; CHECK-LABEL: @src_is_mask_const_sge( +; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 +; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[X]], 32 +; CHECK-NEXT: ret i1 [[R]] +; + %x = xor i8 %x_in, 123 + %and = and i8 %x, 31 + %r = icmp sge i8 %and, %x + ret i1 %r +}