diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -3394,8 +3394,26 @@ return ConstantInt::getTrue(getCompareTy(RHS)); } - if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() && - LBO->getOperand(1) == RBO->getOperand(1)) { + if (!MaxRecurse || !LBO || !RBO || LBO->getOpcode() != RBO->getOpcode()) + return nullptr; + + if (LBO->getOperand(0) == RBO->getOperand(0)) { + switch (LBO->getOpcode()) { + default: + break; + case Instruction::Shl: + bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO); + bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO); + if ((!NUW && !NSW) || (!NSW && ICmpInst::isSigned(Pred)) || + !isKnownNonZero(LBO->getOperand(0), Q.DL)) + break; + if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(1), + RBO->getOperand(1), Q, MaxRecurse - 1)) + return V; + } + } + + if (LBO->getOperand(1) == RBO->getOperand(1)) { switch (LBO->getOpcode()) { default: break; diff --git a/llvm/test/Transforms/InstSimplify/compare.ll b/llvm/test/Transforms/InstSimplify/compare.ll --- a/llvm/test/Transforms/InstSimplify/compare.ll +++ b/llvm/test/Transforms/InstSimplify/compare.ll @@ -2812,6 +2812,143 @@ ret i1 %res } + +define i1 @icmp_lshr_known_non_zero_ult_true(i8 %x) { +; CHECK-LABEL: @icmp_lshr_known_non_zero_ult_true( +; CHECK-NEXT: ret i1 true +; + %or = or i8 %x, 1 + %x1 = shl nuw i8 %or, 1 + %x2 = shl nuw i8 %or, 2 + %cmp = icmp ult i8 %x1, %x2 + ret i1 %cmp +} + +define i1 @icmp_lshr_known_non_zero_ult_false(i8 %x) { +; CHECK-LABEL: @icmp_lshr_known_non_zero_ult_false( +; CHECK-NEXT: ret i1 false +; + %or = or i8 %x, 1 + %x1 = shl nuw i8 %or, 1 + %x2 = shl nuw i8 %or, 2 + %cmp = icmp ugt i8 %x1, %x2 + ret i1 %cmp +} + +define i1 @icmp_lshr_known_non_zero_slt_true(i8 %x) { +; CHECK-LABEL: @icmp_lshr_known_non_zero_slt_true( +; CHECK-NEXT: ret i1 true +; + %or = or i8 %x, 1 + %x1 = shl nuw nsw i8 %or, 1 + %x2 = shl nuw nsw i8 %or, 2 + %cmp = icmp slt i8 %x1, %x2 + ret i1 %cmp +} + +define i1 @icmp_lshr_known_non_zero_slt_false(i8 %x) { +; CHECK-LABEL: @icmp_lshr_known_non_zero_slt_false( +; CHECK-NEXT: ret i1 false +; + %or = or i8 %x, 1 + %x1 = shl nuw nsw i8 %or, 2 + %x2 = shl nuw nsw i8 %or, 1 + %cmp = icmp slt i8 %x1, %x2 + ret i1 %cmp +} + +define i1 @neg_icmp_lshr_unknown_value(i8 %x) { +; CHECK-LABEL: @neg_icmp_lshr_unknown_value( +; CHECK-NEXT: [[X1:%.*]] = shl nuw i8 [[X:%.*]], 2 +; CHECK-NEXT: [[X2:%.*]] = shl nuw i8 [[X]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[X1]], [[X2]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %x1 = shl nuw i8 %x, 2 + %x2 = shl nuw i8 %x, 1 + %cmp = icmp ugt i8 %x1, %x2 + ret i1 %cmp +} + +define i1 @neg_icmp_lshr_unknown_shift(i8 %x, i8 %C1) { +; CHECK-LABEL: @neg_icmp_lshr_unknown_shift( +; CHECK-NEXT: [[OR:%.*]] = or i8 [[X:%.*]], 1 +; CHECK-NEXT: [[X1:%.*]] = shl nuw i8 [[OR]], 2 +; CHECK-NEXT: [[X2:%.*]] = shl nuw i8 [[OR]], [[C1:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[X1]], [[X2]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %or = or i8 %x, 1 + %x1 = shl nuw i8 %or, 2 + %x2 = shl nuw i8 %or, %C1 + %cmp = icmp ugt i8 %x1, %x2 + ret i1 %cmp +} + +define i1 @neg_icmp_lshr_known_non_zero_sgt_missing_nuw(i8 %x) { +; CHECK-LABEL: @neg_icmp_lshr_known_non_zero_sgt_missing_nuw( +; CHECK-NEXT: [[OR:%.*]] = or i8 [[X:%.*]], 1 +; CHECK-NEXT: [[X1:%.*]] = shl nsw i8 [[OR]], 2 +; CHECK-NEXT: [[X2:%.*]] = shl i8 [[OR]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[X1]], [[X2]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %or = or i8 %x, 1 + %x1 = shl nsw i8 %or, 2 + %x2 = shl i8 %or, 1 + %cmp = icmp sgt i8 %x1, %x2 + ret i1 %cmp +} + +define i1 @neg_icmp_lshr_known_non_zero_sgt_missing_nsw(i8 %x) { +; CHECK-LABEL: @neg_icmp_lshr_known_non_zero_sgt_missing_nsw( +; CHECK-NEXT: [[OR:%.*]] = or i8 [[X:%.*]], 1 +; CHECK-NEXT: [[X1:%.*]] = shl nuw i8 [[OR]], 2 +; CHECK-NEXT: [[X2:%.*]] = shl i8 [[OR]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[X1]], [[X2]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %or = or i8 %x, 1 + %x1 = shl nuw i8 %or, 2 + %x2 = shl i8 %or, 1 + %cmp = icmp sgt i8 %x1, %x2 + ret i1 %cmp +} + +define i1 @neg_icmp_lshr_known_non_zero_sgt_nuw_nsw_smallest_shift(i8 %x) { +; CHECK-LABEL: @neg_icmp_lshr_known_non_zero_sgt_nuw_nsw_smallest_shift( +; CHECK-NEXT: [[OR:%.*]] = or i8 [[X:%.*]], 1 +; CHECK-NEXT: [[X1:%.*]] = shl nuw nsw i8 [[OR]], 1 +; CHECK-NEXT: [[X2:%.*]] = shl i8 [[OR]], 2 +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[X1]], [[X2]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %or = or i8 %x, 1 + %x1 = shl nuw nsw i8 %or, 1 + %x2 = shl i8 %or, 2 + %cmp = icmp sgt i8 %x1, %x2 + ret i1 %cmp +} + +define i1 @neg_icmp_lshr_different_shift_values() { +; CHECK-LABEL: @neg_icmp_lshr_different_shift_values( +; CHECK-NEXT: [[VSCALE:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[VSCALE2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[VSCALEX2:%.*]] = shl nuw nsw i64 [[VSCALE]], 1 +; CHECK-NEXT: [[VSCALEX4:%.*]] = shl nuw nsw i64 [[VSCALE2]], 2 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[VSCALEX2]], [[VSCALEX4]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %vscale = call i64 @llvm.vscale.i64() + %vscale2 = call i64 @llvm.vscale.i64() + %vscalex2 = shl nuw nsw i64 %vscale, 1 + %vscalex4 = shl nuw nsw i64 %vscale2, 2 + %cmp = icmp ult i64 %vscalex2, %vscalex4 + ret i1 %cmp +} + +declare i64 @llvm.vscale.i64() + ; TODO: Add coverage for global aliases, link once, etc..