diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -1666,17 +1666,13 @@ if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative())) return nullptr; } else { - // For an arithmetic shift right we can do the same, if we ensure - // the And doesn't use any bits being shifted in. Normally these would - // be turned into lshr by SimplifyDemandedBits, but not if there is an - // additional user. + // For an arithmetic shift, check that both constants don't use (in a + // signed sense) the top bits being shifted out. assert(ShiftOpcode == Instruction::AShr && "Unknown shift opcode"); NewCmpCst = C1.shl(*C3); NewAndCst = C2.shl(*C3); - AnyCmpCstBitsShiftedOut = NewCmpCst.lshr(*C3) != C1; - if (NewAndCst.lshr(*C3) != C2) - return nullptr; - if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative())) + AnyCmpCstBitsShiftedOut = NewCmpCst.ashr(*C3) != C1; + if (NewAndCst.ashr(*C3) != C2) return nullptr; } diff --git a/llvm/test/Transforms/InstCombine/icmp.ll b/llvm/test/Transforms/InstCombine/icmp.ll --- a/llvm/test/Transforms/InstCombine/icmp.ll +++ b/llvm/test/Transforms/InstCombine/icmp.ll @@ -1864,9 +1864,8 @@ define i1 @icmp_and_ashr_neg_and_legal(i8 %x) { ; CHECK-LABEL: @icmp_and_ashr_neg_and_legal( -; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[ASHR]], -2 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[AND]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], -32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[TMP1]], 16 ; CHECK-NEXT: ret i1 [[CMP]] ; %ashr = ashr i8 %x, 4 @@ -1891,9 +1890,8 @@ define i1 @icmp_and_ashr_neg_cmp_slt_legal(i8 %x) { ; CHECK-LABEL: @icmp_and_ashr_neg_cmp_slt_legal( -; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[ASHR]], -2 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[AND]], -4 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], -32 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[TMP1]], -64 ; CHECK-NEXT: ret i1 [[CMP]] ; %ashr = ashr i8 %x, 4 @@ -1918,9 +1916,8 @@ define i1 @icmp_and_ashr_neg_cmp_eq_legal(i8 %x) { ; CHECK-LABEL: @icmp_and_ashr_neg_cmp_eq_legal( -; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[ASHR]], -2 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], -4 +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], -32 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[TMP1]], -64 ; CHECK-NEXT: ret i1 [[CMP]] ; %ashr = ashr i8 %x, 4 @@ -1931,10 +1928,7 @@ define i1 @icmp_and_ashr_neg_cmp_eq_shiftout(i8 %x) { ; CHECK-LABEL: @icmp_and_ashr_neg_cmp_eq_shiftout( -; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[ASHR]], -2 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], -68 -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 false ; %ashr = ashr i8 %x, 4 %and = and i8 %ashr, -2 @@ -1944,10 +1938,7 @@ define i1 @icmp_and_ashr_neg_cmp_ne_shiftout(i8 %x) { ; CHECK-LABEL: @icmp_and_ashr_neg_cmp_ne_shiftout( -; CHECK-NEXT: [[ASHR:%.*]] = ashr i8 [[X:%.*]], 4 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[ASHR]], -2 -; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[AND]], -68 -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 true ; %ashr = ashr i8 %x, 4 %and = and i8 %ashr, -2