Index: include/llvm/IR/PatternMatch.h =================================================================== --- include/llvm/IR/PatternMatch.h +++ include/llvm/IR/PatternMatch.h @@ -407,6 +407,15 @@ return cst_pred_ty(); } +struct is_lowbit_mask { + bool isValue(const APInt &C) { return C.isMask(); } +}; +/// Match an integer or vector with only the low bit(s) set. +/// For vectors, this includes constants with undefined elements. +inline cst_pred_ty m_LowBitMask() { + return cst_pred_ty(); +} + struct is_nan { bool isValue(const APFloat &C) { return C.isNaN(); } }; Index: lib/Transforms/InstCombine/InstCombineCompares.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCompares.cpp +++ lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -4438,6 +4438,38 @@ } } +/// Some comparisons can be simplified. +/// In this case, we are looking for comparisons that look like +/// a check for a lossy truncation. +/// Folds: +/// x & (-1 >> y) == x to x u<= (-1 >> y) +/// The Mask can be a constant, too. +static Value * +canonicalizeUnsignedTruncationCheckLikeICmps(ICmpInst &I, + InstCombiner::BuilderTy &Builder) { + ICmpInst::Predicate SrcPred; + Value *X, *M; + if (!match(&I, m_c_ICmp(SrcPred, + m_c_And(m_CombineAnd(m_CombineOr(m_LShr(m_AllOnes(), + m_Value()), + m_LowBitMask()), + m_Value(M)), + m_Value(X)), + m_Deferred(X)))) + return nullptr; + + ICmpInst::Predicate DstPred; + switch (SrcPred) { + case ICmpInst::Predicate::ICMP_EQ: + DstPred = ICmpInst::Predicate::ICMP_ULE; + break; + default: + return nullptr; + } + + return Builder.CreateICmp(DstPred, X, M); +} + Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { bool Changed = false; Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); @@ -4706,6 +4738,10 @@ if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X) return foldICmpAddOpConst(X, Cst, I.getSwappedPredicate()); } + + if (Value *V = canonicalizeUnsignedTruncationCheckLikeICmps(I, Builder)) + return replaceInstUsesWith(I, V); + return Changed ? &I : nullptr; } Index: test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll =================================================================== --- test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll +++ test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll @@ -15,9 +15,8 @@ define i1 @p0(i8 %x) { ; CHECK-LABEL: @p0( -; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[X:%.*]], 3 -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP0]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[X:%.*]], 4 +; CHECK-NEXT: ret i1 [[TMP1]] ; %tmp0 = and i8 %x, 3 %ret = icmp eq i8 %tmp0, %x @@ -30,9 +29,8 @@ define <2 x i1> @p1_vec_splat(<2 x i8> %x) { ; CHECK-LABEL: @p1_vec_splat( -; CHECK-NEXT: [[TMP0:%.*]] = and <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[RET:%.*]] = icmp eq <2 x i8> [[TMP0]], [[X]] -; CHECK-NEXT: ret <2 x i1> [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <2 x i8> [[X:%.*]], +; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; %tmp0 = and <2 x i8> %x, %ret = icmp eq <2 x i8> %tmp0, %x @@ -41,9 +39,8 @@ define <2 x i1> @p2_vec_nonsplat(<2 x i8> %x) { ; CHECK-LABEL: @p2_vec_nonsplat( -; CHECK-NEXT: [[TMP0:%.*]] = and <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[RET:%.*]] = icmp eq <2 x i8> [[TMP0]], [[X]] -; CHECK-NEXT: ret <2 x i1> [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <2 x i8> [[X:%.*]], +; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; %tmp0 = and <2 x i8> %x, ; doesn't have to be splat. %ret = icmp eq <2 x i8> %tmp0, %x @@ -52,9 +49,8 @@ define <3 x i1> @p3_vec_splat_undef(<3 x i8> %x) { ; CHECK-LABEL: @p3_vec_splat_undef( -; CHECK-NEXT: [[TMP0:%.*]] = and <3 x i8> [[X:%.*]], -; CHECK-NEXT: [[RET:%.*]] = icmp eq <3 x i8> [[TMP0]], [[X]] -; CHECK-NEXT: ret <3 x i1> [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <3 x i8> [[X:%.*]], +; CHECK-NEXT: ret <3 x i1> [[TMP1]] ; %tmp0 = and <3 x i8> %x, %ret = icmp eq <3 x i8> %tmp0, %x @@ -70,9 +66,8 @@ define i1 @c0() { ; CHECK-LABEL: @c0( ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() -; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[X]], 3 -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[X]], [[TMP0]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[X]], 4 +; CHECK-NEXT: ret i1 [[TMP1]] ; %x = call i8 @gen8() %tmp0 = and i8 %x, 3 @@ -90,8 +85,8 @@ ; CHECK-LABEL: @oneuse0( ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[X:%.*]], 3 ; CHECK-NEXT: call void @use8(i8 [[TMP0]]) -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP0]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[X]], 4 +; CHECK-NEXT: ret i1 [[TMP1]] ; %tmp0 = and i8 %x, 3 call void @use8(i8 %tmp0) Index: test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll =================================================================== --- test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll +++ test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll @@ -15,9 +15,8 @@ define i1 @p0(i8 %x, i8 %y) { ; CHECK-LABEL: @p0( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP1]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %tmp0 = lshr i8 -1, %y %tmp1 = and i8 %tmp0, %x @@ -32,9 +31,8 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @p1_vec( ; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i8> , [[Y:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[TMP0]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq <2 x i8> [[TMP1]], [[X]] -; CHECK-NEXT: ret <2 x i1> [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp uge <2 x i8> [[TMP0]], [[X:%.*]] +; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; %tmp0 = lshr <2 x i8> , %y %tmp1 = and <2 x i8> %tmp0, %x @@ -45,9 +43,8 @@ define <3 x i1> @p2_vec_undef(<3 x i8> %x, <3 x i8> %y) { ; CHECK-LABEL: @p2_vec_undef( ; CHECK-NEXT: [[TMP0:%.*]] = lshr <3 x i8> , [[Y:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i8> [[TMP0]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq <3 x i8> [[TMP1]], [[X]] -; CHECK-NEXT: ret <3 x i1> [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp uge <3 x i8> [[TMP0]], [[X:%.*]] +; CHECK-NEXT: ret <3 x i1> [[TMP1]] ; %tmp0 = lshr <3 x i8> , %y %tmp1 = and <3 x i8> %tmp0, %x @@ -65,9 +62,8 @@ ; CHECK-LABEL: @c0( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[TMP0]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP1]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i8 [[X]], [[TMP0]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %tmp0 = lshr i8 -1, %y %x = call i8 @gen8() @@ -80,9 +76,8 @@ ; CHECK-LABEL: @c1( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[X]], [[TMP1]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i8 [[X]], [[TMP0]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %tmp0 = lshr i8 -1, %y %x = call i8 @gen8() @@ -95,9 +90,8 @@ ; CHECK-LABEL: @c2( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[TMP0]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[X]], [[TMP1]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i8 [[X]], [[TMP0]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %tmp0 = lshr i8 -1, %y %x = call i8 @gen8() @@ -116,9 +110,8 @@ ; CHECK-LABEL: @oneuse0( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[TMP0]]) -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP1]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %tmp0 = lshr i8 -1, %y call void @use8(i8 %tmp0) @@ -132,8 +125,8 @@ ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[TMP1]]) -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP1]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[TMP0]], [[X]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %tmp0 = lshr i8 -1, %y %tmp1 = and i8 %tmp0, %x @@ -148,8 +141,8 @@ ; CHECK-NEXT: call void @use8(i8 [[TMP0]]) ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[TMP1]]) -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP1]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[TMP0]], [[X]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %tmp0 = lshr i8 -1, %y call void @use8(i8 %tmp0) Index: test/Transforms/InstCombine/icmp-logical.ll =================================================================== --- test/Transforms/InstCombine/icmp-logical.ll +++ test/Transforms/InstCombine/icmp-logical.ll @@ -88,9 +88,11 @@ define i1 @masked_or_A(i32 %A) { ; CHECK-LABEL: @masked_or_A( -; CHECK-NEXT: [[MASK2:%.*]] = and i32 [[A:%.*]], 39 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[A:%.*]], 8 +; CHECK-NEXT: [[MASK2:%.*]] = and i32 [[A]], 39 ; CHECK-NEXT: [[TST2:%.*]] = icmp eq i32 [[MASK2]], [[A]] -; CHECK-NEXT: ret i1 [[TST2]] +; CHECK-NEXT: [[RES:%.*]] = or i1 [[TMP1]], [[TST2]] +; CHECK-NEXT: ret i1 [[RES]] ; %mask1 = and i32 %A, 7 %tst1 = icmp eq i32 %mask1, %A Index: test/Transforms/InstCombine/icmp-mul-zext.ll =================================================================== --- test/Transforms/InstCombine/icmp-mul-zext.ll +++ test/Transforms/InstCombine/icmp-mul-zext.ll @@ -11,9 +11,8 @@ ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[MUL]], [[SH_PROM]] ; CHECK-NEXT: [[CONV2:%.*]] = zext i32 [[SHR]] to i64 ; CHECK-NEXT: [[MUL3:%.*]] = mul nuw nsw i64 [[CONV]], [[CONV2]] -; CHECK-NEXT: [[CONV6:%.*]] = and i64 [[MUL3]], 4294967295 -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i64 [[CONV6]], [[MUL3]] -; CHECK-NEXT: br i1 [[TOBOOL]], label [[LOR_RHS:%.*]], label [[LOR_END:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i64 [[MUL3]], 4294967295 +; CHECK-NEXT: br i1 [[TMP3]], label [[LOR_END:%.*]], label [[LOR_RHS:%.*]] ; CHECK: lor.rhs: ; CHECK-NEXT: [[AND:%.*]] = and i64 [[MUL3]], [[TMP2]] ; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[AND]] to i32 @@ -21,8 +20,8 @@ ; CHECK-NEXT: [[PHITMP:%.*]] = zext i1 [[TOBOOL7]] to i32 ; CHECK-NEXT: br label [[LOR_END]] ; CHECK: lor.end: -; CHECK-NEXT: [[TMP3:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[PHITMP]], [[LOR_RHS]] ] -; CHECK-NEXT: ret i32 [[TMP3]] +; CHECK-NEXT: [[TMP4:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[PHITMP]], [[LOR_RHS]] ] +; CHECK-NEXT: ret i32 [[TMP4]] ; entry: %conv = zext i32 %0 to i64