Index: lib/Transforms/InstCombine/InstCombineCompares.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCompares.cpp +++ lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -2824,6 +2824,31 @@ if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One())) return new ICmpInst(CmpInst::ICMP_SLE, Op0, C); + // TODO: The subtraction-related identities shown below also hold, but + // canonicalization from (X -nuw 1) to (X + -1) means that the combinations + // wouldn't happen even if they were implemented. + // + // icmp ult (X - 1), Y -> icmp ule X, Y + // icmp uge (X - 1), Y -> icmp ugt X, Y + // icmp ugt X, (Y - 1) -> icmp uge X, Y + // icmp ule X, (Y - 1) -> icmp ult X, Y + + // icmp ule (X + 1), Y -> icmp ult X, Y + if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One())) + return new ICmpInst(CmpInst::ICMP_ULT, A, Op1); + + // icmp ugt (X + 1), Y -> icmp uge X, Y + if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One())) + return new ICmpInst(CmpInst::ICMP_UGE, A, Op1); + + // icmp uge X, (Y + 1) -> icmp ugt X, Y + if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One())) + return new ICmpInst(CmpInst::ICMP_UGT, Op0, C); + + // icmp ult X, (Y + 1) -> icmp ule X, Y + if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One())) + return new ICmpInst(CmpInst::ICMP_ULE, Op0, C); + // if C1 has greater magnitude than C2: // icmp (X + C1), (Y + C2) -> icmp (X + C3), Y // s.t. C3 = C1 - C2 Index: test/Transforms/InstCombine/icmp.ll =================================================================== --- test/Transforms/InstCombine/icmp.ll +++ test/Transforms/InstCombine/icmp.ll @@ -2761,3 +2761,47 @@ %b = icmp eq i64 %o, 0 ret i1 %b } + +define i1 @icmp_add1_ugt(i32 %x, i32 %y) { +; CHECK-LABEL: @icmp_add1_ugt( +; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 %x, %y +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = add nuw i32 %x, 1 + %cmp = icmp ugt i32 %add, %y + ret i1 %cmp +} + +define i1 @icmp_add1_ule(i32 %x, i32 %y) { +; CHECK-LABEL: @icmp_add1_ule( +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 %x, %y +; CHECK-NEXT: ret i1 [[CMP]] +; + %add = add nuw i32 %x, 1 + %cmp = icmp ule i32 %add, %y + ret i1 %cmp +} + +define i1 @cmp_uge_rhs_inc(float %x, i32 %i) { +; CHECK-LABEL: @cmp_uge_rhs_inc( +; CHECK-NEXT: [[CONV:%.*]] = fptosi float %x to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[CONV]], %i +; CHECK-NEXT: ret i1 [[CMP]] +; + %conv = fptosi float %x to i32 + %inc = add nuw i32 %i, 1 + %cmp = icmp uge i32 %conv, %inc + ret i1 %cmp +} + +define i1 @cmp_ult_rhs_inc(float %x, i32 %i) { +; CHECK-LABEL: @cmp_ult_rhs_inc( +; CHECK-NEXT: [[CONV:%.*]] = fptosi float %x to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp ule i32 [[CONV]], %i +; CHECK-NEXT: ret i1 [[CMP]] +; + %conv = fptosi float %x to i32 + %inc = add nuw i32 %i, 1 + %cmp = icmp ult i32 %conv, %inc + ret i1 %cmp +}