diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -2630,6 +2630,35 @@ } } + // Match naive pattern (and its inverted form) for checking if two values + // share same sign. An example of the pattern: (icmp slt (X & Y), 0) | (icmp + // sgt (X | Y), -1) -> (icmp sgt (X ^ Y), -1) Inverted form (example): (icmp + // slt (X | Y), 0) & (icmp sgt (X & Y), -1) -> (icmp slt (X ^ Y), 0) + bool TrueIfSignedL, TrueIfSignedR; + if (InstCombiner::isSignBitCheck(PredL, *LHSC, TrueIfSignedL) && + InstCombiner::isSignBitCheck(PredR, *RHSC, TrueIfSignedR) && + RHS->hasOneUse() && LHS->hasOneUse()) { + Value *X, *Y; + if (((TrueIfSignedL && !TrueIfSignedR && + (IsAnd ? match(LHS0, m_Or(m_Value(X), m_Value(Y))) + : match(LHS0, m_And(m_Value(X), m_Value(Y)))) && + (IsAnd ? match(RHS0, m_c_And(m_Specific(X), m_Specific(Y))) + : match(RHS0, m_c_Or(m_Specific(X), m_Specific(Y))))) || + (!TrueIfSignedL && TrueIfSignedR && + (IsAnd ? match(LHS0, m_And(m_Value(X), m_Value(Y))) + : match(LHS0, m_Or(m_Value(X), m_Value(Y)))) && + (IsAnd ? match(RHS0, m_c_Or(m_Specific(X), m_Specific(Y))) + : match(RHS0, m_c_And(m_Specific(X), m_Specific(Y)))))) && + LHS0->hasOneUse() && RHS0->hasOneUse()) { + Value *NewXor = Builder.CreateXor(X, Y); + Value *NewC = IsAnd ? ConstantInt::getNullValue(NewXor->getType()) + : ConstantInt::getAllOnesValue(NewXor->getType()); + ICmpInst::Predicate NewPred = + IsAnd ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGT; + return Builder.CreateICmp(NewPred, NewXor, NewC); + } + } + return foldAndOrOfICmpsUsingRanges(LHS, RHS, IsAnd); } diff --git a/llvm/test/Transforms/InstCombine/and-or-icmps.ll b/llvm/test/Transforms/InstCombine/and-or-icmps.ll --- a/llvm/test/Transforms/InstCombine/and-or-icmps.ll +++ b/llvm/test/Transforms/InstCombine/and-or-icmps.ll @@ -2,6 +2,7 @@ ; RUN: opt < %s -passes=instcombine -S | FileCheck %s declare void @use(i1) +declare void @use32(i32) define i1 @PR1817_1(i32 %X) { ; CHECK-LABEL: @PR1817_1( @@ -2098,3 +2099,219 @@ %and2 = and i1 %and1, %c2 ret i1 %and2 } + +define i1 @samesign(i32 %x, i32 %y) { +; CHECK-LABEL: @samesign( +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[x:%.*]], [[y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], -1 +; CHECK-NEXT: ret i1 [[TMP2]] + %a = and i32 %x, %y + %lt = icmp slt i32 %a, 0 + %o = or i32 %x, %y + %gt = icmp sgt i32 %o, -1 + %r = or i1 %lt, %gt + ret i1 %r +} + +define i1 @samesign_commute1(i32 %x, i32 %y) { +; CHECK-LABEL: @samesign_commute1( +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[x:%.*]], [[y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], -1 +; CHECK-NEXT: ret i1 [[TMP2]] + %a = and i32 %x, %y + %lt = icmp slt i32 %a, 0 + %o = or i32 %x, %y + %gt = icmp sgt i32 %o, -1 + %r = or i1 %gt, %lt ; compares swapped + ret i1 %r +} + +define i1 @samesign_commute2(i32 %x, i32 %y) { +; CHECK-LABEL: @samesign_commute2( +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[x:%.*]], [[y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], -1 +; CHECK-NEXT: ret i1 [[TMP2]] + %a = and i32 %x, %y + %lt = icmp slt i32 %a, 0 + %o = or i32 %y, %x ; inputs commuted + %gt = icmp sgt i32 %o, -1 + %r = or i1 %lt, %gt + ret i1 %r +} + +define i1 @samesign_commute3(i32 %x, i32 %y) { +; CHECK-LABEL: @samesign_commute3( +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[x:%.*]], [[y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], -1 +; CHECK-NEXT: ret i1 [[TMP2]] + %a = and i32 %x, %y + %lt = icmp slt i32 %a, 0 + %o = or i32 %y, %x ; inputs commuted + %gt = icmp sgt i32 %o, -1 + %r = or i1 %gt, %lt ; compares swapped + ret i1 %r +} + +define i1 @samesign_mult_use(i32 %x, i32 %y) { + ; CHECK-LABEL: @samesign_mult_use( + ; CHECK-NEXT: [[a:%.*]] = and i32 [[x:%.*]], [[y:%.*]] + ; CHECK-NEXT: call void @use32(i32 [[a]]) + ; CHECK-NEXT: [[lt:%.*]] = icmp slt i32 [[a]], 0 + ; CHECK-NEXT: [[o:%.*]] = or i32 [[x]], [[y]] + ; CHECK-NEXT: call void @use32(i32 [[o]]) + ; CHECK-NEXT: [[gt:%.*]] = icmp sgt i32 [[o]], -1 + ; CHECK-NEXT: [[r:%.*]] = and i1 [[lt]], [[gt]] + ; CHECK-NEXT: ret i1 [[r]] + %a = and i32 %x, %y + call void @use32(i32 %a) + %lt = icmp slt i32 %a, 0 + %o = or i32 %x, %y + call void @use32(i32 %o) + %gt = icmp sgt i32 %o, -1 + %r = and i1 %lt, %gt + ret i1 %r +} + +define i1 @samesign_mult_use2(i32 %x, i32 %y) { + ; CHECK-LABEL: @samesign_mult_use2( + ; CHECK-NEXT: [[a:%.*]] = and i32 [[x]], [[y]] + ; CHECK-NEXT: [[lt:%.*]] = icmp slt i32 [[a]], 0 + ; CHECK-NEXT: call void @use(i1 [[lt]]) + ; CHECK-NEXT: [[o:%.*]] = or i32 [[x]], [[y]] + ; CHECK-NEXT: [[gt:%.*]] = icmp sgt i32 [[o]], -1 + ; CHECK-NEXT: call void @use(i1 [[gt]]) + ; CHECK-NEXT: [[r:%.*]] = and i1 [[lt]], [[gt]] + ; CHECK-NEXT: ret i1 [[r:%.*]] + %a = and i32 %x, %y + %lt = icmp slt i32 %a, 0 + call void @use(i1 %lt) + %o = or i32 %x, %y + %gt = icmp sgt i32 %o, -1 + call void @use(i1 %gt) + %r = and i1 %lt, %gt + ret i1 %r +} + +define i1 @samesign_wrong_cmp(i32 %x, i32 %y) { + ; CHECK-LABEL: @samesign_wrong_cmp( + ; CHECK-NEXT: [[a:%.*]] = and i32 [[x:%.*]], [[y:%.*]] + ; CHECK-NEXT: [[lt:%.*]] = icmp slt i32 [[a]], 1 + ; CHECK-NEXT: [[o:%.*]] = or i32 [[x]], [[y]] + ; CHECK-NEXT: [[gt:%.*]] = icmp sgt i32 [[o]], -1 + ; CHECK-NEXT: [[r:%.*]] = and i1 [[lt]], [[gt]] + ; CHECK-NEXT: ret i1 [[r]] + %a = and i32 %x, %y + %lt = icmp slt i32 %a, 1 ; not a sign-bit test + %o = or i32 %x, %y + %gt = icmp sgt i32 %o, -1 + %r = and i1 %lt, %gt + ret i1 %r +} + +define i1 @samesign_inverted(i32 %x, i32 %y) { + ; CHECK-LABEL: @samesign_inverted( + ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[x:%.*]], [[y:%.*]] + ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0 + ; CHECK-NEXT: ret i1 [[TMP2]] + %a = and i32 %x, %y + %gt = icmp sgt i32 %a, -1 + %o = or i32 %x, %y + %lt = icmp slt i32 %o, 0 + %r = and i1 %gt, %lt + ret i1 %r +} + +define i1 @samesign_inverted_commute1(i32 %x, i32 %y) { + ; CHECK-LABEL: @samesign_inverted_commute1( + ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[x:%.*]], [[y:%.*]] + ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0 + ; CHECK-NEXT: ret i1 [[TMP2]] + %a = and i32 %x, %y + %gt = icmp sgt i32 %a, -1 + %o = or i32 %x, %y + %lt = icmp slt i32 %o, 0 + %r = and i1 %lt, %gt ; compares swapped + ret i1 %r +} + +define i1 @samesign_inverted_commute2(i32 %x, i32 %y) { + ; CHECK-LABEL: @samesign_inverted_commute2( + ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[x:%.*]], [[y:%.*]] + ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0 + ; CHECK-NEXT: ret i1 [[TMP2]] + %a = and i32 %x, %y + %gt = icmp sgt i32 %a, -1 + %o = or i32 %y, %x ; source values are commuted + %lt = icmp slt i32 %o, 0 + %r = and i1 %gt, %lt + ret i1 %r +} + +define i1 @samesign_inverted_commute3(i32 %x, i32 %y) { + ; CHECK-LABEL: @samesign_inverted_commute3( + ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[x:%.*]], [[y:%.*]] + ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0 + ; CHECK-NEXT: ret i1 [[TMP2]] + %a = and i32 %x, %y + %gt = icmp sgt i32 %a, -1 + %o = or i32 %y, %x ; source values commuted + %lt = icmp slt i32 %o, 0 + %r = and i1 %lt, %gt ; compares swapped + ret i1 %r +} + +define i1 @samesign_inverted_mult_use(i32 %x, i32 %y) { + ; CHECK-LABEL: @samesign_inverted_mult_use( + ; CHECK-NEXT: [[a:%.*]] = and i32 [[x:%.*]], [[y:%.*]] + ; CHECK-NEXT: call void @use32(i32 [[a]]) + ; CHECK-NEXT: [[gt:%.*]] = icmp sgt i32 [[a]], -1 + ; CHECK-NEXT: [[o:%.*]] = or i32 [[x]], [[y]] + ; CHECK-NEXT: call void @use32(i32 [[o]]) + ; CHECK-NEXT: [[lt:%.*]] = icmp slt i32 [[o]], 0 + ; CHECK-NEXT: [[r:%.*]] = and i1 [[gt]], [[lt]] + ; CHECK-NEXT: ret i1 [[r:%.*]] + %a = and i32 %x, %y + call void @use32(i32 %a) + %gt = icmp sgt i32 %a, -1 + %o = or i32 %x, %y + call void @use32(i32 %o) + %lt = icmp slt i32 %o, 0 + %r = and i1 %gt, %lt + ret i1 %r +} + +define i1 @samesign_inverted_mult_use2(i32 %x, i32 %y) { + ; CHECK-LABEL: @samesign_inverted_mult_use2( + ; CHECK-NEXT: [[a:%.*]] = and i32 [[x:%.*]], [[y:%.*]] + ; CHECK-NEXT: [[gt:%.*]] = icmp sgt i32 [[a]], -1 + ; CHECK-NEXT: call void @use(i1 [[gt]]) + ; CHECK-NEXT: [[o:%.*]] = or i32 [[x:%.*]], [[y:%.*]] + ; CHECK-NEXT: [[lt:%.*]] = icmp slt i32 [[o]], 0 + ; CHECK-NEXT: call void @use(i1 [[lt]]) + ; CHECK-NEXT: [[r:%.*]] = and i1 [[gt]], [[lt]] + ; CHECK-NEXT: ret i1 [[r]] + %a = and i32 %x, %y + %gt = icmp sgt i32 %a, -1 + call void @use(i1 %gt) + %o = or i32 %x, %y + %lt = icmp slt i32 %o, 0 + call void @use(i1 %lt) + %r = and i1 %gt, %lt + ret i1 %r +} + +define i1 @samesign_inverted_wrong_cmp(i32 %x, i32 %y) { + ; CHECK-LABEL: @samesign_inverted_wrong_cmp( + ; CHECK-NEXT: [[a:%.*]] = and i32 [[x:%.*]], [[y:%.*]] + ; CHECK-NEXT: [[gt:%.*]] = icmp sgt i32 [[a]], 0 + ; CHECK-NEXT: [[o:%.*]] = or i32 [[x]], [[y]] + ; CHECK-NEXT: [[lt:%.*]] = icmp slt i32 [[o]], 0 + ; CHECK-NEXT: [[r:%.*]] = and i1 [[gt]], [[lt]] + ; CHECK-NEXT: ret i1 [[r]] + %a = and i32 %x, %y + %gt = icmp sgt i32 %a, 0 ; not a sign-bit test + %o = or i32 %x, %y + %lt = icmp slt i32 %o, 0 + %r = and i1 %gt, %lt + ret i1 %r +}