diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1073,13 +1073,25 @@ // Given ZeroCmpOp = (A + B) // ZeroCmpOp <= A && ZeroCmpOp != 0 --> (0-B) < A + // ZeroCmpOp < A && ZeroCmpOp != 0 --> (0-B) < A iff A/B != 0 // ZeroCmpOp > A || ZeroCmpOp == 0 --> (0-B) >= A + // ZeroCmpOp >= A || ZeroCmpOp == 0 --> (0-B) >= A iff A/B != 0 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE && IsAnd) return Builder.CreateICmpULT(Builder.CreateNeg(B), A); + if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE && + IsAnd && + (isKnownNonZero(A, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT) || + isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))) + return Builder.CreateICmpULT(Builder.CreateNeg(B), A); if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ && !IsAnd) return Builder.CreateICmpUGE(Builder.CreateNeg(B), A); + if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ && + !IsAnd && + (isKnownNonZero(A, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT) || + isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))) + return Builder.CreateICmpUGE(Builder.CreateNeg(B), A); } Value *Base, *Offset; diff --git a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll --- a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll +++ b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll @@ -31,10 +31,9 @@ ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]] -; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]] -; CHECK-NEXT: ret i1 [[R]] +; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: ret i1 [[TMP2]] ; %cmp = icmp slt i8 %base, 0 call void @llvm.assume(i1 %cmp) @@ -54,10 +53,9 @@ ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]] -; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]] -; CHECK-NEXT: ret i1 [[R]] +; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: ret i1 [[TMP2]] ; %cmp = icmp slt i8 %offset, 0 call void @llvm.assume(i1 %cmp) @@ -79,9 +77,9 @@ ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]] -; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]] -; CHECK-NEXT: ret i1 [[R]] +; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: ret i1 [[TMP2]] ; %cmp = icmp slt i8 %base, 0 call void @llvm.assume(i1 %cmp) @@ -100,11 +98,11 @@ ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) -; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]] -; CHECK-NEXT: ret i1 [[R]] +; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: ret i1 [[TMP2]] ; %cmp = icmp slt i8 %base, 0 call void @llvm.assume(i1 %cmp) @@ -149,10 +147,9 @@ ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]] -; CHECK-NEXT: [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]] -; CHECK-NEXT: ret i1 [[R]] +; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: ret i1 [[TMP2]] ; %cmp = icmp slt i8 %base, 0 call void @llvm.assume(i1 %cmp) @@ -170,10 +167,9 @@ ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]] -; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]] -; CHECK-NEXT: ret i1 [[R]] +; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: ret i1 [[TMP2]] ; %cmp = icmp slt i8 %base, 0 call void @llvm.assume(i1 %cmp) @@ -191,10 +187,9 @@ ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]] -; CHECK-NEXT: [[R:%.*]] = and i1 [[NO_UNDERFLOW]], [[NOT_NULL]] -; CHECK-NEXT: ret i1 [[R]] +; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: ret i1 [[TMP2]] ; %cmp = icmp slt i8 %base, 0 call void @llvm.assume(i1 %cmp) @@ -214,10 +209,9 @@ ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i8 [[ADJUSTED]], 0 -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i8 [[ADJUSTED]], [[BASE]] -; CHECK-NEXT: [[R:%.*]] = or i1 [[NOT_NULL]], [[NO_UNDERFLOW]] -; CHECK-NEXT: ret i1 [[R]] +; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp uge i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: ret i1 [[TMP2]] ; %cmp = icmp slt i8 %base, 0 call void @llvm.assume(i1 %cmp) @@ -237,10 +231,9 @@ ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = and i1 [[NOT_NULL]], [[NO_UNDERFLOW]] -; CHECK-NEXT: ret i1 [[R]] +; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: ret i1 [[TMP2]] ; %cmp = icmp slt i8 %base, 0 call void @llvm.assume(i1 %cmp)