Index: llvm/lib/Analysis/InstructionSimplify.cpp =================================================================== --- llvm/lib/Analysis/InstructionSimplify.cpp +++ llvm/lib/Analysis/InstructionSimplify.cpp @@ -2849,6 +2849,39 @@ return nullptr; } + +// If only one of the icmp's operands has NSW flags, try to prove that: +// +// icmp slt (x + C1), (x +nsw C2) +// +// is equivalent to: +// +// icmp slt C1, C2 +// +// which is true if x + C2 has the NSW flags set and C1 < C2 && C1 >= 0. +// +static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS, + Value *RHS) { + // TODO: only support icmp slt for now. + if (Pred != CmpInst::ICMP_SLT) + return false; + + // Canonicalize nsw add as RHS. + if (!match(RHS, m_NSWAdd(m_Value(), m_Value()))) + std::swap(LHS, RHS); + if (!match(RHS, m_NSWAdd(m_Value(), m_Value()))) + return false; + + Value *X; + const APInt *C1, *C2; + if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) || + !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2)))) + return false; + + return C1->slt(*C2) && C1->isNonNegative(); +} + + /// TODO: A large part of this logic is duplicated in InstCombine's /// foldICmpBinOp(). We should be able to share that and avoid the code /// duplication. @@ -2898,8 +2931,9 @@ return V; // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow. - if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem && - NoRHSWrapProblem) { + bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) || + trySimplifyICmpWithAdds(Pred, LHS, RHS); + if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) { // Determine Y and Z in the form icmp (X+Y), (X+Z). Value *Y, *Z; if (A == C) { Index: llvm/test/Transforms/InstSimplify/compare.ll =================================================================== --- llvm/test/Transforms/InstSimplify/compare.ll +++ llvm/test/Transforms/InstSimplify/compare.ll @@ -1769,12 +1769,7 @@ define i1 @icmp_nsw_1(i32 %V) { ; CHECK-LABEL: @icmp_nsw_1( -; CHECK-NEXT: [[ADD5:%.*]] = add i32 [[V:%.*]], 5 -; CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 [[V]], 6 -; CHECK-NEXT: [[S1:%.*]] = sext i32 [[ADD5]] to i64 -; CHECK-NEXT: [[S2:%.*]] = sext i32 [[ADD6]] to i64 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[S1]], [[S2]] -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 true ; %add5 = add i32 %V, 5 %add6 = add nsw i32 %V, 6 @@ -1786,10 +1781,7 @@ define i1 @icmp_nsw_2(i32 %V) { ; CHECK-LABEL: @icmp_nsw_2( -; CHECK-NEXT: [[ADD5:%.*]] = add i32 [[V:%.*]], 5 -; CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 [[V]], 6 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD5]], [[ADD6]] -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 true ; %add5 = add i32 %V, 5 %add6 = add nsw i32 %V, 6 @@ -1799,10 +1791,7 @@ define i1 @icmp_nsw_commute(i32 %V) { ; CHECK-LABEL: @icmp_nsw_commute( -; CHECK-NEXT: [[ADD5:%.*]] = add i32 5, [[V:%.*]] -; CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 [[V]], 6 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD5]], [[ADD6]] -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 true ; %add5 = add i32 5, %V %add6 = add nsw i32 %V, 6 @@ -1812,10 +1801,7 @@ define i1 @icmp_nsw_commute2(i32 %V) { ; CHECK-LABEL: @icmp_nsw_commute2( -; CHECK-NEXT: [[ADD5:%.*]] = add i32 [[V:%.*]], 5 -; CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 6, [[V]] -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD5]], [[ADD6]] -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 true ; %add5 = add i32 %V, 5 %add6 = add nsw i32 6, %V @@ -1825,10 +1811,7 @@ define i1 @icmp_nsw_commute3(i32 %V) { ; CHECK-LABEL: @icmp_nsw_commute3( -; CHECK-NEXT: [[ADD5:%.*]] = add i32 5, [[V:%.*]] -; CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 6, [[V]] -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD5]], [[ADD6]] -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 true ; %add5 = add i32 5, %V %add6 = add nsw i32 6, %V @@ -1861,10 +1844,7 @@ define i1 @icmp_nsw_false(i32 %V) { ; CHECK-LABEL: @icmp_nsw_false( -; CHECK-NEXT: [[ADD5:%.*]] = add nsw i32 [[V:%.*]], 6 -; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[V]], 5 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[ADD5]], [[ADD6]] -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 false ; %add5 = add nsw i32 %V, 6 %add6 = add i32 %V, 5 @@ -1910,10 +1890,7 @@ define i1 @icmp_nsw_i8(i8 %V) { ; CHECK-LABEL: @icmp_nsw_i8( -; CHECK-NEXT: [[ADD5:%.*]] = add i8 [[V:%.*]], 5 -; CHECK-NEXT: [[ADD6:%.*]] = add nsw i8 [[V]], 6 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[ADD5]], [[ADD6]] -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 true ; %add5 = add i8 %V, 5 %add6 = add nsw i8 %V, 6 @@ -1923,10 +1900,7 @@ define i1 @icmp_nsw_i16(i16 %V) { ; CHECK-LABEL: @icmp_nsw_i16( -; CHECK-NEXT: [[ADD5:%.*]] = add i16 [[V:%.*]], 5 -; CHECK-NEXT: [[ADD6:%.*]] = add nsw i16 [[V]], 6 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[ADD5]], [[ADD6]] -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 true ; %add5 = add i16 %V, 5 %add6 = add nsw i16 %V, 6 @@ -1936,10 +1910,7 @@ define i1 @icmp_nsw_i64(i64 %V) { ; CHECK-LABEL: @icmp_nsw_i64( -; CHECK-NEXT: [[ADD5:%.*]] = add i64 [[V:%.*]], 5 -; CHECK-NEXT: [[ADD6:%.*]] = add nsw i64 [[V]], 6 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[ADD5]], [[ADD6]] -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 true ; %add5 = add i64 %V, 5 %add6 = add nsw i64 %V, 6 @@ -1949,10 +1920,7 @@ define <4 x i1> @icmp_nsw_vec(<4 x i32> %V) { ; CHECK-LABEL: @icmp_nsw_vec( -; CHECK-NEXT: [[ADD5:%.*]] = add <4 x i32> [[V:%.*]], -; CHECK-NEXT: [[ADD6:%.*]] = add nsw <4 x i32> [[V]], -; CHECK-NEXT: [[CMP:%.*]] = icmp slt <4 x i32> [[ADD5]], [[ADD6]] -; CHECK-NEXT: ret <4 x i1> [[CMP]] +; CHECK-NEXT: ret <4 x i1> ; %add5 = add <4 x i32> %V, %add6 = add nsw <4 x i32> %V,