diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -375,6 +375,7 @@ bool InvertFalseVal = false); Value *getSelectCondition(Value *A, Value *B, bool ABIsTheSame); + Instruction *foldLShrOverflowBit(BinaryOperator &I); Instruction *foldExtractOfOverflowIntrinsic(ExtractValueInst &EV); Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II); Instruction *foldFPSignBitOps(BinaryOperator &I); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -839,6 +839,73 @@ return nullptr; } +// Tries to perform +// (lshr (add (zext X), (zext Y)), K) +// -> (icmp ult (add X, Y), X) +// where +// - The add's operands are zexts from a K-bits integer to a bigger type. +// - The add is only used by the shr, or by iK (or narrower) truncates. +// - The lshr type has more than 2 bits (other types are boolean math). +// - K > 1 +// note that +// - The resulting add cannot have nuw/nsw, else on overflow we get a +// poison value and the transform isn't legal anymore. +Instruction *InstCombinerImpl::foldLShrOverflowBit(BinaryOperator &I) { + assert(I.getOpcode() == Instruction::LShr); + + Value *Add = I.getOperand(0); + Value *ShiftAmt = I.getOperand(1); + Type *Ty = I.getType(); + + if (Ty->getScalarSizeInBits() < 3) + return nullptr; + + const APInt *ShAmtAPInt = nullptr; + Value *X = nullptr, *Y = nullptr; + if (!match(ShiftAmt, m_APInt(ShAmtAPInt)) || + !match(Add, m_Add(m_ZExt(m_Value(X)), m_ZExt(m_Value(Y))))) + return nullptr; + + const unsigned ShAmt = ShAmtAPInt->getZExtValue(); + if (ShAmt == 1) + return nullptr; + + // X/Y are zexts from `ShAmt`-sized ints. + if (X->getType()->getScalarSizeInBits() != ShAmt || + Y->getType()->getScalarSizeInBits() != ShAmt) + return nullptr; + + // Make sure that `Add` is only used by `I` and `ShAmt`-truncates. + if (!Add->hasOneUse()) { + for (User *U : Add->users()) { + if (U == &I) + continue; + + TruncInst *Trunc = dyn_cast(U); + if (!Trunc || Trunc->getType()->getScalarSizeInBits() > ShAmt) + return nullptr; + } + } + + // Insert at Add so that the newly created `NarrowAdd` will dominate it's + // users (i.e. `Add`'s users). + Instruction *AddInst = cast(Add); + Builder.SetInsertPoint(AddInst); + + Value *NarrowAdd = Builder.CreateAdd(X, Y, "add.narrowed"); + Value *Overflow = + Builder.CreateICmpULT(NarrowAdd, X, "add.narrowed.overflow"); + + // Replace the uses of the original add with a zext of the + // NarrowAdd's result. Note that all users at this stage are known to + // be ShAmt-sized truncs, or the lshr itself. + if (!Add->hasOneUse()) + replaceInstUsesWith(*AddInst, Builder.CreateZExt(NarrowAdd, Ty)); + + // Replace the LShr with a zext of the overflow check. + return new ZExtInst(Overflow, Ty); +} + Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) { const SimplifyQuery Q = SQ.getWithInstruction(&I); @@ -1327,6 +1394,9 @@ return BinaryOperator::CreateAnd(Mask, X); } + if (Instruction *Overflow = foldLShrOverflowBit(I)) + return Overflow; + return nullptr; } diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll --- a/llvm/test/Transforms/InstCombine/lshr.ll +++ b/llvm/test/Transforms/InstCombine/lshr.ll @@ -1045,10 +1045,9 @@ define i4 @not_bool_add_lshr(i2 %a, i2 %b) { ; CHECK-LABEL: @not_bool_add_lshr( -; CHECK-NEXT: [[ZEXT_A:%.*]] = zext i2 [[A:%.*]] to i4 -; CHECK-NEXT: [[ZEXT_B:%.*]] = zext i2 [[B:%.*]] to i4 -; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i4 [[ZEXT_A]], [[ZEXT_B]] -; CHECK-NEXT: [[LSHR:%.*]] = lshr i4 [[ADD]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = xor i2 [[A:%.*]], -1 +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i2 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i4 ; CHECK-NEXT: ret i4 [[LSHR]] ; %zext.a = zext i2 %a to i4 diff --git a/llvm/test/Transforms/InstCombine/shift-add.ll b/llvm/test/Transforms/InstCombine/shift-add.ll --- a/llvm/test/Transforms/InstCombine/shift-add.ll +++ b/llvm/test/Transforms/InstCombine/shift-add.ll @@ -431,3 +431,271 @@ %r = shl nsw i4 2, %a ret i4 %r } + +define i2 @lshr_2_add_zext_basic(i1 %a, i1 %b) { +; CHECK-LABEL: @lshr_2_add_zext_basic( +; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[TMP1]] to i2 +; CHECK-NEXT: ret i2 [[LSHR]] +; + %zext.a = zext i1 %a to i2 + %zext.b = zext i1 %b to i2 + %add = add i2 %zext.a, %zext.b + %lshr = lshr i2 %add, 1 + ret i2 %lshr +} + +define i2 @ashr_2_add_zext_basic(i1 %a, i1 %b) { +; CHECK-LABEL: @ashr_2_add_zext_basic( +; CHECK-NEXT: [[ZEXT_A:%.*]] = zext i1 [[A:%.*]] to i2 +; CHECK-NEXT: [[ZEXT_B:%.*]] = zext i1 [[B:%.*]] to i2 +; CHECK-NEXT: [[ADD:%.*]] = add nuw i2 [[ZEXT_A]], [[ZEXT_B]] +; CHECK-NEXT: [[LSHR:%.*]] = ashr i2 [[ADD]], 1 +; CHECK-NEXT: ret i2 [[LSHR]] +; + %zext.a = zext i1 %a to i2 + %zext.b = zext i1 %b to i2 + %add = add i2 %zext.a, %zext.b + %lshr = ashr i2 %add, 1 + ret i2 %lshr +} + +define i32 @lshr_16_add_zext_basic(i16 %a, i16 %b) { +; CHECK-LABEL: @lshr_16_add_zext_basic( +; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], -1 +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i16 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i32 +; CHECK-NEXT: ret i32 [[LSHR]] +; + %zext.a = zext i16 %a to i32 + %zext.b = zext i16 %b to i32 + %add = add i32 %zext.a, %zext.b + %lshr = lshr i32 %add, 16 + ret i32 %lshr +} + +define i32 @lshr_16_add_known_16_leading_zeroes(i32 %a, i32 %b) { +; CHECK-LABEL: @lshr_16_add_known_16_leading_zeroes( +; CHECK-NEXT: [[A16:%.*]] = and i32 [[A:%.*]], 65535 +; CHECK-NEXT: [[B16:%.*]] = and i32 [[B:%.*]], 65535 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[A16]], [[B16]] +; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ADD]], 16 +; CHECK-NEXT: ret i32 [[LSHR]] +; + %a16 = and i32 %a, 65535 ; 0x65535 + %b16 = and i32 %b, 65535 ; 0x65535 + %add = add i32 %a16, %b16 + %lshr = lshr i32 %add, 16 + ret i32 %lshr +} + +define i32 @lshr_16_add_not_known_16_leading_zeroes(i32 %a, i32 %b) { +; CHECK-LABEL: @lshr_16_add_not_known_16_leading_zeroes( +; CHECK-NEXT: [[A16:%.*]] = and i32 [[A:%.*]], 131071 +; CHECK-NEXT: [[B16:%.*]] = and i32 [[B:%.*]], 65535 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[A16]], [[B16]] +; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ADD]], 16 +; CHECK-NEXT: ret i32 [[LSHR]] +; + %a16 = and i32 %a, 131071 ; 0x1FFFF + %b16 = and i32 %b, 65535 ; 0x65535 + %add = add i32 %a16, %b16 + %lshr = lshr i32 %add, 16 + ret i32 %lshr +} + +define i64 @lshr_32_add_zext_basic(i32 %a, i32 %b) { +; CHECK-LABEL: @lshr_32_add_zext_basic( +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1 +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i32 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64 +; CHECK-NEXT: ret i64 [[LSHR]] +; + %zext.a = zext i32 %a to i64 + %zext.b = zext i32 %b to i64 + %add = add i64 %zext.a, %zext.b + %lshr = lshr i64 %add, 32 + ret i64 %lshr +} + +define i64 @lshr_31_i32_add_zext_basic(i32 %a, i32 %b) { +; CHECK-LABEL: @lshr_31_i32_add_zext_basic( +; CHECK-NEXT: [[ZEXT_A:%.*]] = zext i32 [[A:%.*]] to i64 +; CHECK-NEXT: [[ZEXT_B:%.*]] = zext i32 [[B:%.*]] to i64 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[ZEXT_A]], [[ZEXT_B]] +; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 [[ADD]], 31 +; CHECK-NEXT: ret i64 [[LSHR]] +; + %zext.a = zext i32 %a to i64 + %zext.b = zext i32 %b to i64 + %add = add i64 %zext.a, %zext.b + %lshr = lshr i64 %add, 31 + ret i64 %lshr +} + +define i64 @lshr_33_i32_add_zext_basic(i32 %a, i32 %b) { +; CHECK-LABEL: @lshr_33_i32_add_zext_basic( +; CHECK-NEXT: ret i64 0 +; + %zext.a = zext i32 %a to i64 + %zext.b = zext i32 %b to i64 + %add = add i64 %zext.a, %zext.b + %lshr = lshr i64 %add, 33 + ret i64 %lshr +} + +define i64 @lshr_16_to_64_add_zext_basic(i16 %a, i16 %b) { +; CHECK-LABEL: @lshr_16_to_64_add_zext_basic( +; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], -1 +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i16 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64 +; CHECK-NEXT: ret i64 [[LSHR]] +; + %zext.a = zext i16 %a to i64 + %zext.b = zext i16 %b to i64 + %add = add i64 %zext.a, %zext.b + %lshr = lshr i64 %add, 16 + ret i64 %lshr +} + +define i64 @lshr_32_add_known_32_leading_zeroes(i64 %a, i64 %b) { +; CHECK-LABEL: @lshr_32_add_known_32_leading_zeroes( +; CHECK-NEXT: [[A32:%.*]] = and i64 [[A:%.*]], 4294967295 +; CHECK-NEXT: [[B32:%.*]] = and i64 [[B:%.*]], 4294967295 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[A32]], [[B32]] +; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 [[ADD]], 32 +; CHECK-NEXT: ret i64 [[LSHR]] +; + %a32 = and i64 %a, 4294967295 ; 0xFFFFFFFF + %b32 = and i64 %b, 4294967295 ; 0xFFFFFFFF + %add = add i64 %a32, %b32 + %lshr = lshr i64 %add, 32 + ret i64 %lshr +} + +define i64 @lshr_32_add_not_known_32_leading_zeroes(i64 %a, i64 %b) { +; +; CHECK-LABEL: @lshr_32_add_not_known_32_leading_zeroes( +; CHECK-NEXT: [[A32:%.*]] = and i64 [[A:%.*]], 8589934591 +; CHECK-NEXT: [[B32:%.*]] = and i64 [[B:%.*]], 4294967295 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[A32]], [[B32]] +; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 [[ADD]], 32 +; CHECK-NEXT: ret i64 [[LSHR]] +; + %a32 = and i64 %a, 8589934591 ; 0x1FFFFFFFF + %b32 = and i64 %b, 4294967295 ; 0xFFFFFFFF + %add = add i64 %a32, %b32 + %lshr = lshr i64 %add, 32 + ret i64 %lshr +} + +define i32 @ashr_16_add_zext_basic(i16 %a, i16 %b) { +; CHECK-LABEL: @ashr_16_add_zext_basic( +; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], -1 +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i16 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i32 +; CHECK-NEXT: ret i32 [[LSHR]] +; + %zext.a = zext i16 %a to i32 + %zext.b = zext i16 %b to i32 + %add = add i32 %zext.a, %zext.b + %lshr = lshr i32 %add, 16 + ret i32 %lshr +} + +define i64 @ashr_32_add_zext_basic(i32 %a, i32 %b) { +; CHECK-LABEL: @ashr_32_add_zext_basic( +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1 +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i32 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64 +; CHECK-NEXT: ret i64 [[LSHR]] +; + %zext.a = zext i32 %a to i64 + %zext.b = zext i32 %b to i64 + %add = add i64 %zext.a, %zext.b + %lshr = ashr i64 %add, 32 + ret i64 %lshr +} + +define i64 @ashr_16_to_64_add_zext_basic(i16 %a, i16 %b) { +; CHECK-LABEL: @ashr_16_to_64_add_zext_basic( +; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], -1 +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i16 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64 +; CHECK-NEXT: ret i64 [[LSHR]] +; + %zext.a = zext i16 %a to i64 + %zext.b = zext i16 %b to i64 + %add = add i64 %zext.a, %zext.b + %lshr = ashr i64 %add, 16 + ret i64 %lshr +} + +define i32 @lshr_32_add_zext_trunc(i32 %a, i32 %b) { +; CHECK-LABEL: @lshr_32_add_zext_trunc( +; CHECK-NEXT: [[ADD_NARROWED:%.*]] = add i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i32 [[ADD_NARROWED]], [[A]] +; CHECK-NEXT: [[TRUNC_SHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i32 +; CHECK-NEXT: [[RET:%.*]] = add i32 [[ADD_NARROWED]], [[TRUNC_SHR]] +; CHECK-NEXT: ret i32 [[RET]] +; + %zext.a = zext i32 %a to i64 + %zext.b = zext i32 %b to i64 + %add = add i64 %zext.a, %zext.b + %trunc.add = trunc i64 %add to i32 + %shr = lshr i64 %add, 32 + %trunc.shr = trunc i64 %shr to i32 + %ret = add i32 %trunc.add, %trunc.shr + ret i32 %ret +} + +define <3 x i32> @add3_i96(<3 x i32> %0, <3 x i32> %1) { +; CHECK-LABEL: @add3_i96( +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <3 x i32> [[TMP0:%.*]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <3 x i32> [[TMP1:%.*]], i64 0 +; CHECK-NEXT: [[ADD_NARROWED:%.*]] = add i32 [[TMP4]], [[TMP3]] +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i32 [[ADD_NARROWED]], [[TMP4]] +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <3 x i32> [[TMP0]], i64 1 +; CHECK-NEXT: [[TMP6:%.*]] = zext i32 [[TMP5]] to i64 +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <3 x i32> [[TMP1]], i64 1 +; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[TMP9:%.*]] = add nuw nsw i64 [[TMP8]], [[TMP6]] +; CHECK-NEXT: [[TMP10:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64 +; CHECK-NEXT: [[TMP11:%.*]] = add nuw nsw i64 [[TMP9]], [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <3 x i32> [[TMP0]], i64 2 +; CHECK-NEXT: [[TMP13:%.*]] = extractelement <3 x i32> [[TMP1]], i64 2 +; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[TMP13]], [[TMP12]] +; CHECK-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP11]], 32 +; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i32 +; CHECK-NEXT: [[TMP17:%.*]] = add i32 [[TMP14]], [[TMP16]] +; CHECK-NEXT: [[TMP18:%.*]] = insertelement <3 x i32> undef, i32 [[ADD_NARROWED]], i64 0 +; CHECK-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP11]] to i32 +; CHECK-NEXT: [[TMP20:%.*]] = insertelement <3 x i32> [[TMP18]], i32 [[TMP19]], i64 1 +; CHECK-NEXT: [[TMP21:%.*]] = insertelement <3 x i32> [[TMP20]], i32 [[TMP17]], i64 2 +; CHECK-NEXT: ret <3 x i32> [[TMP21]] +; + %3 = extractelement <3 x i32> %0, i64 0 + %4 = zext i32 %3 to i64 + %5 = extractelement <3 x i32> %1, i64 0 + %6 = zext i32 %5 to i64 + %7 = add nuw nsw i64 %6, %4 + %8 = extractelement <3 x i32> %0, i64 1 + %9 = zext i32 %8 to i64 + %10 = extractelement <3 x i32> %1, i64 1 + %11 = zext i32 %10 to i64 + %12 = add nuw nsw i64 %11, %9 + %13 = lshr i64 %7, 32 + %14 = add nuw nsw i64 %12, %13 + %15 = extractelement <3 x i32> %0, i64 2 + %16 = extractelement <3 x i32> %1, i64 2 + %17 = add i32 %16, %15 + %18 = lshr i64 %14, 32 + %19 = trunc i64 %18 to i32 + %20 = add i32 %17, %19 + %21 = trunc i64 %7 to i32 + %22 = insertelement <3 x i32> undef, i32 %21, i32 0 + %23 = trunc i64 %14 to i32 + %24 = insertelement <3 x i32> %22, i32 %23, i32 1 + %25 = insertelement <3 x i32> %24, i32 %20, i32 2 + ret <3 x i32> %25 +}