diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -2145,7 +2145,9 @@ m_Select(m_Value(), m_Specific(Op1), m_Specific(&I))) || match(UI, m_Select(m_Value(), m_Specific(&I), m_Specific(Op1))); })) { - if (Value *NegOp1 = Negator::Negate(IsNegation, Op1, *this)) + if (Value *NegOp1 = Negator::Negate(IsNegation, /* IsNSW */ IsNegation && + I.hasNoSignedWrap(), + Op1, *this)) return BinaryOperator::CreateAdd(NegOp1, Op0); } if (IsNegation) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -738,13 +738,13 @@ std::array getSortedOperandsOfBinOp(Instruction *I); - [[nodiscard]] Value *visitImpl(Value *V, unsigned Depth); + [[nodiscard]] Value *visitImpl(Value *V, bool IsNSW, unsigned Depth); - [[nodiscard]] Value *negate(Value *V, unsigned Depth); + [[nodiscard]] Value *negate(Value *V, bool IsNSW, unsigned Depth); /// Recurse depth-first and attempt to sink the negation. /// FIXME: use worklist? - [[nodiscard]] std::optional run(Value *Root); + [[nodiscard]] std::optional run(Value *Root, bool IsNSW); Negator(const Negator &) = delete; Negator(Negator &&) = delete; @@ -754,7 +754,7 @@ public: /// Attempt to negate \p Root. Retuns nullptr if negation can't be performed, /// otherwise returns negated value. - [[nodiscard]] static Value *Negate(bool LHSIsZero, Value *Root, + [[nodiscard]] static Value *Negate(bool LHSIsZero, bool IsNSW, Value *Root, InstCombinerImpl &IC); }; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -258,9 +258,14 @@ if (Op0->hasOneUse() && match(Op1, m_NegatedPower2())) { // Interpret X * (-1<(Op1)), I.getName()); + if (Value *NegOp0 = + Negator::Negate(/*IsNegation*/ true, HasNSW, Op0, *this)) { + auto *Op1C = cast(Op1); + return replaceInstUsesWith( + I, Builder.CreateMul(NegOp0, ConstantExpr::getNeg(Op1C), "", + /* HasNUW */ false, + HasNSW && Op1C->isNotMinSignedValue())); + } // Try to convert multiply of extended operand to narrow negate and shift // for better analysis. diff --git a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp @@ -128,7 +128,7 @@ // FIXME: can this be reworked into a worklist-based algorithm while preserving // the depth-first, early bailout traversal? -[[nodiscard]] Value *Negator::visitImpl(Value *V, unsigned Depth) { +[[nodiscard]] Value *Negator::visitImpl(Value *V, bool IsNSW, unsigned Depth) { // -(undef) -> undef. if (match(V, m_Undef())) return V; @@ -237,7 +237,8 @@ // However, only do this either if the old `sub` doesn't stick around, or // it was subtracting from a constant. Otherwise, this isn't profitable. return Builder.CreateSub(I->getOperand(1), I->getOperand(0), - I->getName() + ".neg"); + I->getName() + ".neg", /* HasNUW */ false, + IsNSW && I->hasNoSignedWrap()); } // Some other cases, while still don't require recursion, @@ -302,7 +303,7 @@ switch (I->getOpcode()) { case Instruction::Freeze: { // `freeze` is negatible if its operand is negatible. - Value *NegOp = negate(I->getOperand(0), Depth + 1); + Value *NegOp = negate(I->getOperand(0), IsNSW, Depth + 1); if (!NegOp) // Early return. return nullptr; return Builder.CreateFreeze(NegOp, I->getName() + ".neg"); @@ -313,7 +314,7 @@ SmallVector NegatedIncomingValues(PHI->getNumOperands()); for (auto I : zip(PHI->incoming_values(), NegatedIncomingValues)) { if (!(std::get<1>(I) = - negate(std::get<0>(I), Depth + 1))) // Early return. + negate(std::get<0>(I), IsNSW, Depth + 1))) // Early return. return nullptr; } // All incoming values are indeed negatible. Create negated PHI node. @@ -336,10 +337,10 @@ return NewSelect; } // `select` is negatible if both hands of `select` are negatible. - Value *NegOp1 = negate(I->getOperand(1), Depth + 1); + Value *NegOp1 = negate(I->getOperand(1), IsNSW, Depth + 1); if (!NegOp1) // Early return. return nullptr; - Value *NegOp2 = negate(I->getOperand(2), Depth + 1); + Value *NegOp2 = negate(I->getOperand(2), IsNSW, Depth + 1); if (!NegOp2) return nullptr; // Do preserve the metadata! @@ -349,10 +350,10 @@ case Instruction::ShuffleVector: { // `shufflevector` is negatible if both operands are negatible. auto *Shuf = cast(I); - Value *NegOp0 = negate(I->getOperand(0), Depth + 1); + Value *NegOp0 = negate(I->getOperand(0), IsNSW, Depth + 1); if (!NegOp0) // Early return. return nullptr; - Value *NegOp1 = negate(I->getOperand(1), Depth + 1); + Value *NegOp1 = negate(I->getOperand(1), IsNSW, Depth + 1); if (!NegOp1) return nullptr; return Builder.CreateShuffleVector(NegOp0, NegOp1, Shuf->getShuffleMask(), @@ -361,7 +362,7 @@ case Instruction::ExtractElement: { // `extractelement` is negatible if source operand is negatible. auto *EEI = cast(I); - Value *NegVector = negate(EEI->getVectorOperand(), Depth + 1); + Value *NegVector = negate(EEI->getVectorOperand(), IsNSW, Depth + 1); if (!NegVector) // Early return. return nullptr; return Builder.CreateExtractElement(NegVector, EEI->getIndexOperand(), @@ -371,10 +372,10 @@ // `insertelement` is negatible if both the source vector and // element-to-be-inserted are negatible. auto *IEI = cast(I); - Value *NegVector = negate(IEI->getOperand(0), Depth + 1); + Value *NegVector = negate(IEI->getOperand(0), IsNSW, Depth + 1); if (!NegVector) // Early return. return nullptr; - Value *NegNewElt = negate(IEI->getOperand(1), Depth + 1); + Value *NegNewElt = negate(IEI->getOperand(1), IsNSW, Depth + 1); if (!NegNewElt) // Early return. return nullptr; return Builder.CreateInsertElement(NegVector, NegNewElt, IEI->getOperand(2), @@ -382,15 +383,17 @@ } case Instruction::Trunc: { // `trunc` is negatible if its operand is negatible. - Value *NegOp = negate(I->getOperand(0), Depth + 1); + Value *NegOp = negate(I->getOperand(0), /* IsNSW */ false, Depth + 1); if (!NegOp) // Early return. return nullptr; return Builder.CreateTrunc(NegOp, I->getType(), I->getName() + ".neg"); } case Instruction::Shl: { // `shl` is negatible if the first operand is negatible. - if (Value *NegOp0 = negate(I->getOperand(0), Depth + 1)) - return Builder.CreateShl(NegOp0, I->getOperand(1), I->getName() + ".neg"); + IsNSW &= I->hasNoSignedWrap(); + if (Value *NegOp0 = negate(I->getOperand(0), IsNSW, Depth + 1)) + return Builder.CreateShl(NegOp0, I->getOperand(1), I->getName() + ".neg", + /* HasNUW */ false, IsNSW); // Otherwise, `shl %x, C` can be interpreted as `mul %x, 1<(I->getOperand(1)); if (!Op1C || !IsTrulyNegation) @@ -398,7 +401,7 @@ return Builder.CreateMul( I->getOperand(0), ConstantExpr::getShl(Constant::getAllOnesValue(Op1C->getType()), Op1C), - I->getName() + ".neg"); + I->getName() + ".neg", /* HasNUW */ false, IsNSW); } case Instruction::Or: { if (!haveNoCommonBitsSet(I->getOperand(0), I->getOperand(1), DL, &AC, I, @@ -417,7 +420,7 @@ SmallVector NegatedOps, NonNegatedOps; for (Value *Op : I->operands()) { // Can we sink the negation into this operand? - if (Value *NegOp = negate(Op, Depth + 1)) { + if (Value *NegOp = negate(Op, /* IsNSW */ false, Depth + 1)) { NegatedOps.emplace_back(NegOp); // Successfully negated operand! continue; } @@ -458,16 +461,17 @@ Value *NegatedOp, *OtherOp; // First try the second operand, in case it's a constant it will be best to // just invert it instead of sinking the `neg` deeper. - if (Value *NegOp1 = negate(Ops[1], Depth + 1)) { + if (Value *NegOp1 = negate(Ops[1], /* IsNSW */ false, Depth + 1)) { NegatedOp = NegOp1; OtherOp = Ops[0]; - } else if (Value *NegOp0 = negate(Ops[0], Depth + 1)) { + } else if (Value *NegOp0 = negate(Ops[0], /* IsNSW */ false, Depth + 1)) { NegatedOp = NegOp0; OtherOp = Ops[1]; } else // Can't negate either of them. return nullptr; - return Builder.CreateMul(NegatedOp, OtherOp, I->getName() + ".neg"); + return Builder.CreateMul(NegatedOp, OtherOp, I->getName() + ".neg", + /* HasNUW */ false, IsNSW && I->hasNoSignedWrap()); } default: return nullptr; // Don't know, likely not negatible for free. @@ -476,7 +480,7 @@ llvm_unreachable("Can't get here. We always return from switch."); } -[[nodiscard]] Value *Negator::negate(Value *V, unsigned Depth) { +[[nodiscard]] Value *Negator::negate(Value *V, bool IsNSW, unsigned Depth) { NegatorMaxDepthVisited.updateMax(Depth); ++NegatorNumValuesVisited; @@ -506,15 +510,16 @@ #endif // No luck. Try negating it for real. - Value *NegatedV = visitImpl(V, Depth); + Value *NegatedV = visitImpl(V, IsNSW, Depth); // And cache the (real) result for the future. NegationsCache[V] = NegatedV; return NegatedV; } -[[nodiscard]] std::optional Negator::run(Value *Root) { - Value *Negated = negate(Root, /*Depth=*/0); +[[nodiscard]] std::optional Negator::run(Value *Root, + bool IsNSW) { + Value *Negated = negate(Root, IsNSW, /*Depth=*/0); if (!Negated) { // We must cleanup newly-inserted instructions, to avoid any potential // endless combine looping. @@ -525,7 +530,7 @@ return std::make_pair(ArrayRef(NewInstructions), Negated); } -[[nodiscard]] Value *Negator::Negate(bool LHSIsZero, Value *Root, +[[nodiscard]] Value *Negator::Negate(bool LHSIsZero, bool IsNSW, Value *Root, InstCombinerImpl &IC) { ++NegatorTotalNegationsAttempted; LLVM_DEBUG(dbgs() << "Negator: attempting to sink negation into " << *Root @@ -536,7 +541,7 @@ Negator N(Root->getContext(), IC.getDataLayout(), IC.getAssumptionCache(), IC.getDominatorTree(), LHSIsZero); - std::optional Res = N.run(Root); + std::optional Res = N.run(Root, IsNSW); if (!Res) { // Negation failed. LLVM_DEBUG(dbgs() << "Negator: failed to sink negation into " << *Root << "\n"); diff --git a/llvm/test/Transforms/InstCombine/abs-intrinsic.ll b/llvm/test/Transforms/InstCombine/abs-intrinsic.ll --- a/llvm/test/Transforms/InstCombine/abs-intrinsic.ll +++ b/llvm/test/Transforms/InstCombine/abs-intrinsic.ll @@ -486,7 +486,7 @@ ; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: br i1 [[CMP_NOT]], label [[COND_END:%.*]], label [[COND_TRUE:%.*]] ; CHECK: cond.true: -; CHECK-NEXT: [[SUB_NEG:%.*]] = sub i32 [[Y]], [[X]] +; CHECK-NEXT: [[SUB_NEG:%.*]] = sub nsw i32 [[Y]], [[X]] ; CHECK-NEXT: br label [[COND_END]] ; CHECK: cond.end: ; CHECK-NEXT: [[R:%.*]] = phi i32 [ [[SUB_NEG]], [[COND_TRUE]] ], [ 0, [[ENTRY:%.*]] ] @@ -513,7 +513,7 @@ ; CHECK-NEXT: [[CMP_NOT_NOT:%.*]] = icmp slt i32 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: br i1 [[CMP_NOT_NOT]], label [[COND_FALSE:%.*]], label [[COND_END:%.*]] ; CHECK: cond.false: -; CHECK-NEXT: [[SUB_NEG:%.*]] = sub i32 [[Y]], [[X]] +; CHECK-NEXT: [[SUB_NEG:%.*]] = sub nsw i32 [[Y]], [[X]] ; CHECK-NEXT: br label [[COND_END]] ; CHECK: cond.end: ; CHECK-NEXT: [[R:%.*]] = phi i32 [ [[SUB_NEG]], [[COND_FALSE]] ], [ 0, [[ENTRY:%.*]] ] @@ -539,7 +539,7 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]] ; CHECK: cond.true: -; CHECK-NEXT: [[SUB_NEG:%.*]] = sub i32 [[Y]], [[X]] +; CHECK-NEXT: [[SUB_NEG:%.*]] = sub nsw i32 [[Y]], [[X]] ; CHECK-NEXT: br label [[COND_END]] ; CHECK: cond.end: ; CHECK-NEXT: [[R:%.*]] = phi i32 [ [[SUB_NEG]], [[COND_TRUE]] ], [ 0, [[ENTRY:%.*]] ] @@ -566,7 +566,7 @@ ; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: br i1 [[CMP_NOT]], label [[COND_END:%.*]], label [[COND_TRUE:%.*]] ; CHECK: cond.true: -; CHECK-NEXT: [[SUB_NEG:%.*]] = sub i32 [[Y]], [[X]] +; CHECK-NEXT: [[SUB_NEG:%.*]] = sub nsw i32 [[Y]], [[X]] ; CHECK-NEXT: br label [[COND_END]] ; CHECK: cond.end: ; CHECK-NEXT: [[R:%.*]] = phi i32 [ [[SUB_NEG]], [[COND_TRUE]] ], [ 0, [[ENTRY:%.*]] ] @@ -619,7 +619,7 @@ ; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i8 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: br i1 [[CMP_NOT]], label [[COND_END:%.*]], label [[COND_TRUE:%.*]] ; CHECK: cond.true: -; CHECK-NEXT: [[SUB_NEG:%.*]] = sub i8 [[Y]], [[X]] +; CHECK-NEXT: [[SUB_NEG:%.*]] = sub nsw i8 [[Y]], [[X]] ; CHECK-NEXT: br label [[COND_END]] ; CHECK: cond.end: ; CHECK-NEXT: [[R:%.*]] = phi i8 [ [[SUB_NEG]], [[COND_TRUE]] ], [ 0, [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/InstCombine/mul.ll b/llvm/test/Transforms/InstCombine/mul.ll --- a/llvm/test/Transforms/InstCombine/mul.ll +++ b/llvm/test/Transforms/InstCombine/mul.ll @@ -1545,8 +1545,8 @@ define i8 @mulsub_nsw(i8 %a1, i8 %a2) { ; CHECK-LABEL: @mulsub_nsw( -; CHECK-NEXT: [[A_NEG:%.*]] = sub i8 [[A2:%.*]], [[A1:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[A_NEG]], 1 +; CHECK-NEXT: [[A_NEG:%.*]] = sub nsw i8 [[A2:%.*]], [[A1:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = shl nsw i8 [[A_NEG]], 1 ; CHECK-NEXT: ret i8 [[MUL]] ; %a = sub nsw i8 %a1, %a2 @@ -1554,9 +1554,11 @@ ret i8 %mul } +; It would be safe to keep the nsw on the shl here, but only because the mul +; to shl transform happens to replace undef with 0. define <2 x i8> @mulsub_nsw_undef(<2 x i8> %a1, <2 x i8> %a2) { ; CHECK-LABEL: @mulsub_nsw_undef( -; CHECK-NEXT: [[A_NEG:%.*]] = sub <2 x i8> [[A2:%.*]], [[A1:%.*]] +; CHECK-NEXT: [[A_NEG:%.*]] = sub nsw <2 x i8> [[A2:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[MUL:%.*]] = shl <2 x i8> [[A_NEG]], ; CHECK-NEXT: ret <2 x i8> [[MUL]] ; diff --git a/llvm/test/Transforms/InstCombine/nsw.ll b/llvm/test/Transforms/InstCombine/nsw.ll --- a/llvm/test/Transforms/InstCombine/nsw.ll +++ b/llvm/test/Transforms/InstCombine/nsw.ll @@ -145,7 +145,7 @@ define i32 @neg_sub0_sub_nsw_nsw(i32 %a, i32 %b) { ; CHECK-LABEL: @neg_sub0_sub_nsw_nsw( -; CHECK-NEXT: [[C_NEG:%.*]] = sub i32 [[B:%.*]], [[A:%.*]] +; CHECK-NEXT: [[C_NEG:%.*]] = sub nsw i32 [[B:%.*]], [[A:%.*]] ; CHECK-NEXT: ret i32 [[C_NEG]] ; %c = sub nsw i32 %a, %b @@ -181,7 +181,7 @@ define i32 @neg_mul_sub_nsw_nsw(i32 %a, i32 %b) { ; CHECK-LABEL: @neg_mul_sub_nsw_nsw( -; CHECK-NEXT: [[C_NEG:%.*]] = sub i32 [[B:%.*]], [[A:%.*]] +; CHECK-NEXT: [[C_NEG:%.*]] = sub nsw i32 [[B:%.*]], [[A:%.*]] ; CHECK-NEXT: ret i32 [[C_NEG]] ; %c = sub nsw i32 %a, %b @@ -255,7 +255,7 @@ define i8 @neg_nsw_freeze(i8 %a1, i8 %a2) { ; CHECK-LABEL: @neg_nsw_freeze( -; CHECK-NEXT: [[A_NEG:%.*]] = sub i8 [[A2:%.*]], [[A1:%.*]] +; CHECK-NEXT: [[A_NEG:%.*]] = sub nsw i8 [[A2:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[FR_NEG:%.*]] = freeze i8 [[A_NEG]] ; CHECK-NEXT: ret i8 [[FR_NEG]] ; @@ -269,10 +269,10 @@ ; CHECK-LABEL: @neg_nsw_phi( ; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[ELSE:%.*]] ; CHECK: if: -; CHECK-NEXT: [[A_NEG:%.*]] = sub i8 [[A2:%.*]], [[A1:%.*]] +; CHECK-NEXT: [[A_NEG:%.*]] = sub nsw i8 [[A2:%.*]], [[A1:%.*]] ; CHECK-NEXT: br label [[JOIN:%.*]] ; CHECK: else: -; CHECK-NEXT: [[B_NEG:%.*]] = sub i8 [[B2:%.*]], [[B1:%.*]] +; CHECK-NEXT: [[B_NEG:%.*]] = sub nsw i8 [[B2:%.*]], [[B1:%.*]] ; CHECK-NEXT: br label [[JOIN]] ; CHECK: join: ; CHECK-NEXT: [[PHI_NEG:%.*]] = phi i8 [ [[A_NEG]], [[IF]] ], [ [[B_NEG]], [[ELSE]] ] @@ -296,8 +296,8 @@ define i8 @neg_nsw_select(i1 %c, i8 %a1, i8 %a2, i8 %b1, i8 %b2) { ; CHECK-LABEL: @neg_nsw_select( -; CHECK-NEXT: [[A_NEG:%.*]] = sub i8 [[A2:%.*]], [[A1:%.*]] -; CHECK-NEXT: [[B_NEG:%.*]] = sub i8 [[B2:%.*]], [[B1:%.*]] +; CHECK-NEXT: [[A_NEG:%.*]] = sub nsw i8 [[A2:%.*]], [[A1:%.*]] +; CHECK-NEXT: [[B_NEG:%.*]] = sub nsw i8 [[B2:%.*]], [[B1:%.*]] ; CHECK-NEXT: [[SEL_NEG:%.*]] = select i1 [[C:%.*]], i8 [[A_NEG]], i8 [[B_NEG]] ; CHECK-NEXT: ret i8 [[SEL_NEG]] ; @@ -310,8 +310,8 @@ define <4 x i8> @neg_nsw_shufflevector(<2 x i8> %a1, <2 x i8> %a2, <2 x i8> %b1, <2 x i8> %b2) { ; CHECK-LABEL: @neg_nsw_shufflevector( -; CHECK-NEXT: [[A_NEG:%.*]] = sub <2 x i8> [[A2:%.*]], [[A1:%.*]] -; CHECK-NEXT: [[B_NEG:%.*]] = sub <2 x i8> [[B2:%.*]], [[B1:%.*]] +; CHECK-NEXT: [[A_NEG:%.*]] = sub nsw <2 x i8> [[A2:%.*]], [[A1:%.*]] +; CHECK-NEXT: [[B_NEG:%.*]] = sub nsw <2 x i8> [[B2:%.*]], [[B1:%.*]] ; CHECK-NEXT: [[SHUF_NEG:%.*]] = shufflevector <2 x i8> [[A_NEG]], <2 x i8> [[B_NEG]], <4 x i32> ; CHECK-NEXT: ret <4 x i8> [[SHUF_NEG]] ; @@ -324,7 +324,7 @@ define i8 @neg_nsw_extractelement(<2 x i8> %a1, <2 x i8> %a2) { ; CHECK-LABEL: @neg_nsw_extractelement( -; CHECK-NEXT: [[A_NEG:%.*]] = sub <2 x i8> [[A2:%.*]], [[A1:%.*]] +; CHECK-NEXT: [[A_NEG:%.*]] = sub nsw <2 x i8> [[A2:%.*]], [[A1:%.*]] ; CHECK-NEXT: [[EXTR_NEG:%.*]] = extractelement <2 x i8> [[A_NEG]], i64 0 ; CHECK-NEXT: ret i8 [[EXTR_NEG]] ; @@ -336,8 +336,8 @@ define <2 x i8> @neg_nsw_insertelement(<2 x i8> %a1, <2 x i8> %a2, i8 %b1, i8 %b2) { ; CHECK-LABEL: @neg_nsw_insertelement( -; CHECK-NEXT: [[A_NEG:%.*]] = sub <2 x i8> [[A2:%.*]], [[A1:%.*]] -; CHECK-NEXT: [[B_NEG:%.*]] = sub i8 [[B2:%.*]], [[B1:%.*]] +; CHECK-NEXT: [[A_NEG:%.*]] = sub nsw <2 x i8> [[A2:%.*]], [[A1:%.*]] +; CHECK-NEXT: [[B_NEG:%.*]] = sub nsw i8 [[B2:%.*]], [[B1:%.*]] ; CHECK-NEXT: [[INSERT_NEG:%.*]] = insertelement <2 x i8> [[A_NEG]], i8 [[B_NEG]], i64 0 ; CHECK-NEXT: ret <2 x i8> [[INSERT_NEG]] ; @@ -350,8 +350,8 @@ define i8 @neg_nsw_shl(i8 %a1, i8 %a2, i8 %b) { ; CHECK-LABEL: @neg_nsw_shl( -; CHECK-NEXT: [[A_NEG:%.*]] = sub i8 [[A2:%.*]], [[A1:%.*]] -; CHECK-NEXT: [[SHL_NEG:%.*]] = shl i8 [[A_NEG]], [[B:%.*]] +; CHECK-NEXT: [[A_NEG:%.*]] = sub nsw i8 [[A2:%.*]], [[A1:%.*]] +; CHECK-NEXT: [[SHL_NEG:%.*]] = shl nsw i8 [[A_NEG]], [[B:%.*]] ; CHECK-NEXT: ret i8 [[SHL_NEG]] ; %a = sub nsw i8 %a1, %a2 @@ -374,7 +374,7 @@ define i8 @neg_nsw_shl_to_mul(i8 %a, i8 %b) { ; CHECK-LABEL: @neg_nsw_shl_to_mul( -; CHECK-NEXT: [[SHL_NEG:%.*]] = mul i8 [[A:%.*]], -2 +; CHECK-NEXT: [[SHL_NEG:%.*]] = mul nsw i8 [[A:%.*]], -2 ; CHECK-NEXT: ret i8 [[SHL_NEG]] ; %shl = shl nsw i8 %a, 1 @@ -395,7 +395,7 @@ define i8 @neg_nsw_mul(i8 %a1, i8 %a2, i8 %b) { ; CHECK-LABEL: @neg_nsw_mul( ; CHECK-NEXT: [[A_NEG:%.*]] = sub i8 [[A2:%.*]], [[A1:%.*]] -; CHECK-NEXT: [[SHL_NEG:%.*]] = mul i8 [[A_NEG]], [[B:%.*]] +; CHECK-NEXT: [[SHL_NEG:%.*]] = mul nsw i8 [[A_NEG]], [[B:%.*]] ; CHECK-NEXT: ret i8 [[SHL_NEG]] ; %a = sub nsw i8 %a1, %a2