Index: llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp =================================================================== --- llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -2886,6 +2886,7 @@ /// icmp SrcPred (x & Mask), x to icmp DstPred x, Mask /// Where Mask is some pattern that produces all-ones in low bits: /// (-1 >> y) +/// ((-1 << y) >> y) <- non-canonical, has extra uses /// ~(-1 << y) /// ((1 << y) + (-1)) <- non-canonical, has extra uses /// The Mask can be a constant, too. @@ -2894,11 +2895,12 @@ static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I, InstCombiner::BuilderTy &Builder) { ICmpInst::Predicate SrcPred; - Value *X, *M; - auto m_VariableMask = - m_CombineOr(m_CombineOr(m_Not(m_Shl(m_AllOnes(), m_Value())), - m_Add(m_Shl(m_One(), m_Value()), m_AllOnes())), - m_LShr(m_AllOnes(), m_Value())); + Value *X, *M, *Y; + auto m_VariableMask = m_CombineOr( + m_CombineOr(m_Not(m_Shl(m_AllOnes(), m_Value())), + m_Add(m_Shl(m_One(), m_Value()), m_AllOnes())), + m_CombineOr(m_LShr(m_AllOnes(), m_Value()), + m_LShr(m_Shl(m_AllOnes(), m_Value(Y)), m_Deferred(Y)))); auto m_Mask = m_CombineOr(m_VariableMask, m_LowBitMask()); if (!match(&I, m_c_ICmp(SrcPred, m_c_And(m_CombineAnd(m_Mask, m_Value(M)), m_Value(X)), Index: llvm/trunk/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll +++ llvm/trunk/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll @@ -23,9 +23,8 @@ ; CHECK-NEXT: [[T0:%.*]] = shl i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]] -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) @@ -44,9 +43,8 @@ ; CHECK-NEXT: [[T0:%.*]] = shl <2 x i8> , [[Y:%.*]] ; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> [[T0]], [[Y]] -; CHECK-NEXT: [[T2:%.*]] = and <2 x i8> [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq <2 x i8> [[T2]], [[X]] -; CHECK-NEXT: ret <2 x i1> [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp uge <2 x i8> [[T1]], [[X:%.*]] +; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; %t0 = shl <2 x i8> , %y call void @use2i8(<2 x i8> %t0) @@ -61,9 +59,8 @@ ; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> , [[Y:%.*]] ; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> [[T0]], [[Y]] -; CHECK-NEXT: [[T2:%.*]] = and <3 x i8> [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq <3 x i8> [[T2]], [[X]] -; CHECK-NEXT: ret <3 x i1> [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp uge <3 x i8> [[T1]], [[X:%.*]] +; CHECK-NEXT: ret <3 x i1> [[TMP1]] ; %t0 = shl <3 x i8> , %y call void @use3i8(<3 x i8> %t0) @@ -85,9 +82,8 @@ ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() -; CHECK-NEXT: [[T2:%.*]] = and i8 [[X]], [[T1]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i8 [[X]], [[T1]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) @@ -104,9 +100,8 @@ ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[X]], [[T2]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i8 [[X]], [[T1]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) @@ -123,9 +118,8 @@ ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() -; CHECK-NEXT: [[T2:%.*]] = and i8 [[X]], [[T1]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[X]], [[T2]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i8 [[X]], [[T1]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) @@ -146,9 +140,8 @@ ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]] ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) ; needed anyway @@ -166,8 +159,8 @@ ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]] ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[T1]], [[X]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) ; needed anyway @@ -186,8 +179,8 @@ ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[T1]], [[X]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) Index: llvm/trunk/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll +++ llvm/trunk/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll @@ -23,9 +23,8 @@ ; CHECK-NEXT: [[T0:%.*]] = shl i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]] -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) @@ -44,9 +43,8 @@ ; CHECK-NEXT: [[T0:%.*]] = shl <2 x i8> , [[Y:%.*]] ; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> [[T0]], [[Y]] -; CHECK-NEXT: [[T2:%.*]] = and <2 x i8> [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ne <2 x i8> [[T2]], [[X]] -; CHECK-NEXT: ret <2 x i1> [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <2 x i8> [[T1]], [[X:%.*]] +; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; %t0 = shl <2 x i8> , %y call void @use2i8(<2 x i8> %t0) @@ -61,9 +59,8 @@ ; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> , [[Y:%.*]] ; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> [[T0]], [[Y]] -; CHECK-NEXT: [[T2:%.*]] = and <3 x i8> [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ne <3 x i8> [[T2]], [[X]] -; CHECK-NEXT: ret <3 x i1> [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <3 x i8> [[T1]], [[X:%.*]] +; CHECK-NEXT: ret <3 x i1> [[TMP1]] ; %t0 = shl <3 x i8> , %y call void @use3i8(<3 x i8> %t0) @@ -85,9 +82,8 @@ ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() -; CHECK-NEXT: [[T2:%.*]] = and i8 [[X]], [[T1]] -; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i8 [[X]], [[T1]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) @@ -104,9 +100,8 @@ ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X]] -; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[X]], [[T2]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i8 [[X]], [[T1]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) @@ -123,9 +118,8 @@ ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]] ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() -; CHECK-NEXT: [[T2:%.*]] = and i8 [[X]], [[T1]] -; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[X]], [[T2]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i8 [[X]], [[T1]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) @@ -146,9 +140,8 @@ ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]] ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) ; needed anyway @@ -166,8 +159,8 @@ ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]] ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[T1]], [[X]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0) ; needed anyway @@ -186,8 +179,8 @@ ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[T1]], [[X]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %t0 = shl i8 -1, %y call void @use8(i8 %t0)