diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -367,6 +367,7 @@ Value *matchSelectFromAndOr(Value *A, Value *B, Value *C, Value *D); Value *getSelectCondition(Value *A, Value *B); + Instruction *foldLShrToOverflow(BinaryOperator &I); Instruction *foldExtractOfOverflowIntrinsic(ExtractValueInst &EV); Instruction *foldIntrinsicWithOverflowCommon(IntrinsicInst *II); Instruction *foldFPSignBitOps(BinaryOperator &I); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -839,6 +839,77 @@ return nullptr; } +// Tries to perform +// (lshr (add X, Y), K) +// -> (llvm.uadd.with.overflow (trunc X), (trunc Y)).overflow +// where +// - Only the K leading bits of X and Y can be non-zero. +// - The add is only used by the shr, or by iK truncates. +// - The lshr type has more than 2 bits. +Instruction *InstCombinerImpl::foldLShrToOverflow(BinaryOperator &I) { + assert(I.getOpcode() == Instruction::LShr); + + Value *Add = I.getOperand(0); + Value *ShiftAmt = I.getOperand(1); + Type *Ty = I.getType(); + + if (Ty->getScalarSizeInBits() < 3) + return nullptr; + + const APInt *ShAmtAPInt = nullptr; + Value *X = nullptr, *Y = nullptr; + if (!match(ShiftAmt, m_APInt(ShAmtAPInt)) || + !match(Add, m_Add(m_Value(X), m_Value(Y)))) + return nullptr; + + const unsigned ShAmt = ShAmtAPInt->getZExtValue(); + const unsigned BitWidth = Ty->getScalarSizeInBits(); + const unsigned MinLeadingZeroes = BitWidth - ShAmt; + + if (computeKnownBits(X, 0, &I).countMinLeadingZeros() < MinLeadingZeroes || + computeKnownBits(Y, 0, &I).countMinLeadingZeros() < MinLeadingZeroes) + return nullptr; + + // Make sure that `Add` is only used by `I` and `ShAmt`-truncates. + if (!Add->hasOneUse()) { + for (User *U : Add->users()) { + if (U == &I) + continue; + + TruncInst *Trunc = dyn_cast(U); + if (!Trunc || Trunc->getType()->getScalarSizeInBits() > ShAmt) + return nullptr; + } + } + + // Insert at Add so that the newly created `UAdd` will dominate it's users + // (i.e. `Add`'s users). + Instruction *AddInst = cast(Add); + BasicBlock::iterator RestoreInsPt = Builder.GetInsertPoint(); + Builder.SetInsertPoint(AddInst); + + Type *OpTy = Builder.getIntNTy(ShAmt); + X = Builder.CreateTrunc(X, OpTy); + Y = Builder.CreateTrunc(Y, OpTy); + + Value *UAddOverflow = Builder.CreateBinaryIntrinsic( + Intrinsic::uadd_with_overflow, X, Y, /* FMFSource */ nullptr, "uaddo"); + Value *UAdd = Builder.CreateExtractValue(UAddOverflow, 0, + UAddOverflow->getName() + ".add"); + Value *Overflow = Builder.CreateExtractValue( + UAddOverflow, 1, UAddOverflow->getName() + ".overflow"); + + // Replace the uses of the original add with a zext of the + // uaddo's result. + if (!Add->hasOneUse()) + replaceInstUsesWith(*AddInst, Builder.CreateZExt(UAdd, Ty)); + + Builder.SetInsertPoint(&(*RestoreInsPt)); + + // Replace the use of `Add` by `I` with `Overflow`. + return new ZExtInst(Overflow, Ty); +} + Instruction *InstCombinerImpl::visitShl(BinaryOperator &I) { const SimplifyQuery Q = SQ.getWithInstruction(&I); @@ -1305,6 +1376,9 @@ return BinaryOperator::CreateAnd(Mask, X); } + if (Instruction *Overflow = foldLShrToOverflow(I)) + return Overflow; + return nullptr; } diff --git a/llvm/test/Transforms/InstCombine/pr34349.ll b/llvm/test/Transforms/InstCombine/pr34349.ll --- a/llvm/test/Transforms/InstCombine/pr34349.ll +++ b/llvm/test/Transforms/InstCombine/pr34349.ll @@ -10,8 +10,11 @@ ; CHECK-NEXT: [[V6:%.*]] = trunc i16 [[V5]] to i8 ; CHECK-NEXT: [[V7:%.*]] = sub i8 [[P]], [[V6]] ; CHECK-NEXT: [[V8:%.*]] = lshr i8 [[V7]], 1 -; CHECK-NEXT: [[V13:%.*]] = add nuw i8 [[V8]], [[V6]] -; CHECK-NEXT: [[V14:%.*]] = lshr i8 [[V13]], 7 +; CHECK-NEXT: [[TMP0:%.*]] = trunc i8 [[V8]] to i7 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i16 [[V5]] to i7 +; CHECK-NEXT: [[UADDO:%.*]] = call { i7, i1 } @llvm.uadd.with.overflow.i7(i7 [[TMP0]], i7 [[TMP1]]) +; CHECK-NEXT: [[UADDO_OVERFLOW:%.*]] = extractvalue { i7, i1 } [[UADDO]], 1 +; CHECK-NEXT: [[V14:%.*]] = zext i1 [[UADDO_OVERFLOW]] to i8 ; CHECK-NEXT: ret i8 [[V14]] ; entry: diff --git a/llvm/test/Transforms/InstCombine/shift-add.ll b/llvm/test/Transforms/InstCombine/shift-add.ll --- a/llvm/test/Transforms/InstCombine/shift-add.ll +++ b/llvm/test/Transforms/InstCombine/shift-add.ll @@ -464,10 +464,9 @@ define i32 @lshr_16_add_zext_basic(i16 %a, i16 %b) { ; CHECK-LABEL: @lshr_16_add_zext_basic( -; CHECK-NEXT: [[ZEXT_A:%.*]] = zext i16 [[A:%.*]] to i32 -; CHECK-NEXT: [[ZEXT_B:%.*]] = zext i16 [[B:%.*]] to i32 -; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[ZEXT_A]], [[ZEXT_B]] -; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ADD]], 16 +; CHECK-NEXT: [[UADDO:%.*]] = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 [[A:%.*]], i16 [[B:%.*]]) +; CHECK-NEXT: [[UADDO_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[UADDO]], 1 +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[UADDO_OVERFLOW]] to i32 ; CHECK-NEXT: ret i32 [[LSHR]] ; %zext.a = zext i16 %a to i32 @@ -481,10 +480,11 @@ define i32 @lshr_16_add_known_16_leading_zeroes(i32 %a, i32 %b) { ; CHECK-LABEL: @lshr_16_add_known_16_leading_zeroes( -; CHECK-NEXT: [[A16:%.*]] = and i32 [[A:%.*]], 65535 -; CHECK-NEXT: [[B16:%.*]] = and i32 [[B:%.*]], 65535 -; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[A16]], [[B16]] -; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ADD]], 16 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[A:%.*]] to i16 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i32 [[B:%.*]] to i16 +; CHECK-NEXT: [[UADDO:%.*]] = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 [[TMP1]], i16 [[TMP2]]) +; CHECK-NEXT: [[UADDO_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[UADDO]], 1 +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[UADDO_OVERFLOW]] to i32 ; CHECK-NEXT: ret i32 [[LSHR]] ; %a16 = and i32 %a, 65535 ; 0x65535 @@ -513,10 +513,9 @@ define i64 @lshr_32_add_zext_basic(i32 %a, i32 %b) { ; CHECK-LABEL: @lshr_32_add_zext_basic( -; CHECK-NEXT: [[ZEXT_A:%.*]] = zext i32 [[A:%.*]] to i64 -; CHECK-NEXT: [[ZEXT_B:%.*]] = zext i32 [[B:%.*]] to i64 -; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[ZEXT_A]], [[ZEXT_B]] -; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 [[ADD]], 32 +; CHECK-NEXT: [[UADDO:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +; CHECK-NEXT: [[UADDO_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[UADDO]], 1 +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[UADDO_OVERFLOW]] to i64 ; CHECK-NEXT: ret i64 [[LSHR]] ; %zext.a = zext i32 %a to i64 @@ -528,10 +527,9 @@ define i64 @lshr_16_to_64_add_zext_basic(i16 %a, i16 %b) { ; CHECK-LABEL: @lshr_16_to_64_add_zext_basic( -; CHECK-NEXT: [[ZEXT_A:%.*]] = zext i16 [[A:%.*]] to i64 -; CHECK-NEXT: [[ZEXT_B:%.*]] = zext i16 [[B:%.*]] to i64 -; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[ZEXT_A]], [[ZEXT_B]] -; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 [[ADD]], 16 +; CHECK-NEXT: [[UADDO:%.*]] = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 [[A:%.*]], i16 [[B:%.*]]) +; CHECK-NEXT: [[UADDO_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[UADDO]], 1 +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[UADDO_OVERFLOW]] to i64 ; CHECK-NEXT: ret i64 [[LSHR]] ; %zext.a = zext i16 %a to i64 @@ -545,10 +543,11 @@ define i64 @lshr_32_add_known_32_leading_zeroes(i64 %a, i64 %b) { ; CHECK-LABEL: @lshr_32_add_known_32_leading_zeroes( -; CHECK-NEXT: [[A32:%.*]] = and i64 [[A:%.*]], 4294967295 -; CHECK-NEXT: [[B32:%.*]] = and i64 [[B:%.*]], 4294967295 -; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[A32]], [[B32]] -; CHECK-NEXT: [[LSHR:%.*]] = lshr i64 [[ADD]], 32 +; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i32 +; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[B:%.*]] to i32 +; CHECK-NEXT: [[UADDO:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[TMP1]], i32 [[TMP2]]) +; CHECK-NEXT: [[UADDO_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[UADDO]], 1 +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[UADDO_OVERFLOW]] to i64 ; CHECK-NEXT: ret i64 [[LSHR]] ; %a32 = and i64 %a, 4294967295 ; 0xFFFFFFFF @@ -578,10 +577,9 @@ define i32 @ashr_16_add_zext_basic(i16 %a, i16 %b) { ; CHECK-LABEL: @ashr_16_add_zext_basic( -; CHECK-NEXT: [[ZEXT_A:%.*]] = zext i16 [[A:%.*]] to i32 -; CHECK-NEXT: [[ZEXT_B:%.*]] = zext i16 [[B:%.*]] to i32 -; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[ZEXT_A]], [[ZEXT_B]] -; CHECK-NEXT: [[LSHR:%.*]] = lshr i32 [[ADD]], 16 +; CHECK-NEXT: [[UADDO:%.*]] = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 [[A:%.*]], i16 [[B:%.*]]) +; CHECK-NEXT: [[UADDO_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[UADDO]], 1 +; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[UADDO_OVERFLOW]] to i32 ; CHECK-NEXT: ret i32 [[LSHR]] ; %zext.a = zext i16 %a to i32 @@ -593,10 +591,9 @@ define i64 @ashr_32_add_zext_basic(i32 %a, i32 %b) { ; CHECK-LABEL: @ashr_32_add_zext_basic( -; CHECK-NEXT: [[ZEXT_A:%.*]] = zext i32 [[A:%.*]] to i64 -; CHECK-NEXT: [[ZEXT_B:%.*]] = zext i32 [[B:%.*]] to i64 -; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[ZEXT_A]], [[ZEXT_B]] -; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[ADD]], 32 +; CHECK-NEXT: [[UADDO:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +; CHECK-NEXT: [[UADDO_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[UADDO]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[UADDO_OVERFLOW]] to i64 ; CHECK-NEXT: ret i64 [[TMP1]] ; %zext.a = zext i32 %a to i64 @@ -608,10 +605,9 @@ define i64 @ashr_16_to_64_add_zext_basic(i16 %a, i16 %b) { ; CHECK-LABEL: @ashr_16_to_64_add_zext_basic( -; CHECK-NEXT: [[ZEXT_A:%.*]] = zext i16 [[A:%.*]] to i64 -; CHECK-NEXT: [[ZEXT_B:%.*]] = zext i16 [[B:%.*]] to i64 -; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[ZEXT_A]], [[ZEXT_B]] -; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[ADD]], 16 +; CHECK-NEXT: [[UADDO:%.*]] = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 [[A:%.*]], i16 [[B:%.*]]) +; CHECK-NEXT: [[UADDO_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[UADDO]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[UADDO_OVERFLOW]] to i64 ; CHECK-NEXT: ret i64 [[TMP1]] ; %zext.a = zext i16 %a to i64 @@ -623,13 +619,11 @@ define i32 @lshr_32_add_zext_trunc(i32 %a, i32 %b) { ; CHECK-LABEL: @lshr_32_add_zext_trunc( -; CHECK-NEXT: [[ZEXT_A:%.*]] = zext i32 [[A:%.*]] to i64 -; CHECK-NEXT: [[ZEXT_B:%.*]] = zext i32 [[B:%.*]] to i64 -; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[ZEXT_A]], [[ZEXT_B]] -; CHECK-NEXT: [[TRUNC_ADD:%.*]] = trunc i64 [[ADD]] to i32 -; CHECK-NEXT: [[SHR:%.*]] = lshr i64 [[ADD]], 32 -; CHECK-NEXT: [[TRUNC_SHR:%.*]] = trunc i64 [[SHR]] to i32 -; CHECK-NEXT: [[RET:%.*]] = add i32 [[TRUNC_ADD]], [[TRUNC_SHR]] +; CHECK-NEXT: [[UADDO:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +; CHECK-NEXT: [[UADDO_ADD:%.*]] = extractvalue { i32, i1 } [[UADDO]], 0 +; CHECK-NEXT: [[UADDO_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[UADDO]], 1 +; CHECK-NEXT: [[TRUNC_SHR:%.*]] = zext i1 [[UADDO_OVERFLOW]] to i32 +; CHECK-NEXT: [[RET:%.*]] = add i32 [[UADDO_ADD]], [[TRUNC_SHR]] ; CHECK-NEXT: ret i32 [[RET]] ; %zext.a = zext i32 %a to i64 @@ -645,29 +639,28 @@ define <3 x i32> @add3_i96(<3 x i32> %0, <3 x i32> %1) { ; CHECK-LABEL: @add3_i96( ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <3 x i32> [[TMP0:%.*]], i64 0 -; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 -; CHECK-NEXT: [[TMP5:%.*]] = extractelement <3 x i32> [[TMP1:%.*]], i64 0 +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <3 x i32> [[TMP1:%.*]], i64 0 +; CHECK-NEXT: [[UADDO:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[TMP4]], i32 [[TMP3]]) +; CHECK-NEXT: [[UADDO_ADD:%.*]] = extractvalue { i32, i1 } [[UADDO]], 0 +; CHECK-NEXT: [[UADDO_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[UADDO]], 1 +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <3 x i32> [[TMP0]], i64 1 ; CHECK-NEXT: [[TMP6:%.*]] = zext i32 [[TMP5]] to i64 -; CHECK-NEXT: [[TMP7:%.*]] = add nuw nsw i64 [[TMP6]], [[TMP4]] -; CHECK-NEXT: [[TMP8:%.*]] = extractelement <3 x i32> [[TMP0]], i64 1 -; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -; CHECK-NEXT: [[TMP10:%.*]] = extractelement <3 x i32> [[TMP1]], i64 1 -; CHECK-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -; CHECK-NEXT: [[TMP12:%.*]] = add nuw nsw i64 [[TMP11]], [[TMP9]] -; CHECK-NEXT: [[TMP13:%.*]] = lshr i64 [[TMP7]], 32 -; CHECK-NEXT: [[TMP14:%.*]] = add nuw nsw i64 [[TMP12]], [[TMP13]] -; CHECK-NEXT: [[TMP15:%.*]] = extractelement <3 x i32> [[TMP0]], i64 2 -; CHECK-NEXT: [[TMP16:%.*]] = extractelement <3 x i32> [[TMP1]], i64 2 -; CHECK-NEXT: [[TMP17:%.*]] = add i32 [[TMP16]], [[TMP15]] -; CHECK-NEXT: [[TMP18:%.*]] = lshr i64 [[TMP14]], 32 -; CHECK-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP18]] to i32 -; CHECK-NEXT: [[TMP20:%.*]] = add i32 [[TMP17]], [[TMP19]] -; CHECK-NEXT: [[TMP21:%.*]] = trunc i64 [[TMP7]] to i32 -; CHECK-NEXT: [[TMP22:%.*]] = insertelement <3 x i32> undef, i32 [[TMP21]], i64 0 -; CHECK-NEXT: [[TMP23:%.*]] = trunc i64 [[TMP14]] to i32 -; CHECK-NEXT: [[TMP24:%.*]] = insertelement <3 x i32> [[TMP22]], i32 [[TMP23]], i64 1 -; CHECK-NEXT: [[TMP25:%.*]] = insertelement <3 x i32> [[TMP24]], i32 [[TMP20]], i64 2 -; CHECK-NEXT: ret <3 x i32> [[TMP25]] +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <3 x i32> [[TMP1]], i64 1 +; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 +; CHECK-NEXT: [[TMP9:%.*]] = add nuw nsw i64 [[TMP8]], [[TMP6]] +; CHECK-NEXT: [[TMP10:%.*]] = zext i1 [[UADDO_OVERFLOW]] to i64 +; CHECK-NEXT: [[TMP11:%.*]] = add nuw nsw i64 [[TMP9]], [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <3 x i32> [[TMP0]], i64 2 +; CHECK-NEXT: [[TMP13:%.*]] = extractelement <3 x i32> [[TMP1]], i64 2 +; CHECK-NEXT: [[TMP14:%.*]] = add i32 [[TMP13]], [[TMP12]] +; CHECK-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP11]], 32 +; CHECK-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i32 +; CHECK-NEXT: [[TMP17:%.*]] = add i32 [[TMP14]], [[TMP16]] +; CHECK-NEXT: [[TMP18:%.*]] = insertelement <3 x i32> undef, i32 [[UADDO_ADD]], i64 0 +; CHECK-NEXT: [[TMP19:%.*]] = trunc i64 [[TMP11]] to i32 +; CHECK-NEXT: [[TMP20:%.*]] = insertelement <3 x i32> [[TMP18]], i32 [[TMP19]], i64 1 +; CHECK-NEXT: [[TMP21:%.*]] = insertelement <3 x i32> [[TMP20]], i32 [[TMP17]], i64 2 +; CHECK-NEXT: ret <3 x i32> [[TMP21]] ; %3 = extractelement <3 x i32> %0, i64 0 %4 = zext i32 %3 to i64