diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -1734,22 +1734,18 @@ } if (Constant *C = dyn_cast(Op0)) { - bool IsNegate = match(C, m_ZeroInt()); + // -f(x) -> f(-x) if possible. + if (match(C, m_Zero())) + if (Value *Neg = freelyNegateValue(Op1)) + return replaceInstUsesWith(I, Neg); + Value *X; - if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { - // 0 - (zext bool) --> sext bool + if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) // C - (zext bool) --> bool ? C - 1 : C - if (IsNegate) - return CastInst::CreateSExtOrBitCast(X, I.getType()); return SelectInst::Create(X, SubOne(C), C); - } - if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { - // 0 - (sext bool) --> zext bool + if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) // C - (sext bool) --> bool ? C + 1 : C - if (IsNegate) - return CastInst::CreateZExtOrBitCast(X, I.getType()); return SelectInst::Create(X, AddOne(C), C); - } // C - ~X == X + (1+C) if (match(Op1, m_Not(m_Value(X)))) @@ -1778,51 +1774,15 @@ const APInt *Op0C; if (match(Op0, m_APInt(Op0C))) { - - if (Op0C->isNullValue()) { - Value *Op1Wide; - match(Op1, m_TruncOrSelf(m_Value(Op1Wide))); - bool HadTrunc = Op1Wide != Op1; - bool NoTruncOrTruncIsOneUse = !HadTrunc || Op1->hasOneUse(); - unsigned BitWidth = Op1Wide->getType()->getScalarSizeInBits(); - - Value *X; - const APInt *ShAmt; - // -(X >>u 31) -> (X >>s 31) - if (NoTruncOrTruncIsOneUse && - match(Op1Wide, m_LShr(m_Value(X), m_APInt(ShAmt))) && - *ShAmt == BitWidth - 1) { - Value *ShAmtOp = cast(Op1Wide)->getOperand(1); - Instruction *NewShift = BinaryOperator::CreateAShr(X, ShAmtOp); - NewShift->copyIRFlags(Op1Wide); - if (!HadTrunc) - return NewShift; - Builder.Insert(NewShift); - return TruncInst::CreateTruncOrBitCast(NewShift, Op1->getType()); - } - // -(X >>s 31) -> (X >>u 31) - if (NoTruncOrTruncIsOneUse && - match(Op1Wide, m_AShr(m_Value(X), m_APInt(ShAmt))) && - *ShAmt == BitWidth - 1) { - Value *ShAmtOp = cast(Op1Wide)->getOperand(1); - Instruction *NewShift = BinaryOperator::CreateLShr(X, ShAmtOp); - NewShift->copyIRFlags(Op1Wide); - if (!HadTrunc) - return NewShift; - Builder.Insert(NewShift); - return TruncInst::CreateTruncOrBitCast(NewShift, Op1->getType()); - } - - if (!HadTrunc && Op1->hasOneUse()) { - Value *LHS, *RHS; - SelectPatternFlavor SPF = matchSelectPattern(Op1, LHS, RHS).Flavor; - if (SPF == SPF_ABS || SPF == SPF_NABS) { - // This is a negate of an ABS/NABS pattern. Just swap the operands - // of the select. - cast(Op1)->swapValues(); - // Don't swap prof metadata, we didn't change the branch behavior. - return replaceInstUsesWith(I, Op1); - } + if (Op0C->isNullValue() && Op1->hasOneUse()) { + Value *LHS, *RHS; + SelectPatternFlavor SPF = matchSelectPattern(Op1, LHS, RHS).Flavor; + if (SPF == SPF_ABS || SPF == SPF_NABS) { + // This is a negate of an ABS/NABS pattern. Just swap the operands + // of the select. + cast(Op1)->swapValues(); + // Don't swap prof metadata, we didn't change the branch behavior. + return replaceInstUsesWith(I, Op1); } } @@ -1957,7 +1917,7 @@ } if (Op1->hasOneUse()) { - Value *X = nullptr, *Y = nullptr, *Z = nullptr; + Value *Y = nullptr, *Z = nullptr; Constant *C = nullptr; // (X - (Y - Z)) --> (X + (Z - Y)). @@ -1970,24 +1930,6 @@ return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(Y, Y->getName() + ".not")); - // 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow. - if (match(Op0, m_Zero())) { - Constant *Op11C; - if (match(Op1, m_SDiv(m_Value(X), m_Constant(Op11C))) && - !Op11C->containsUndefElement() && Op11C->isNotMinSignedValue() && - Op11C->isNotOneValue()) { - Instruction *BO = - BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(Op11C)); - BO->setIsExact(cast(Op1)->isExact()); - return BO; - } - } - - // 0 - (X << Y) -> (-X << Y) when X is freely negatable. - if (match(Op1, m_Shl(m_Value(X), m_Value(Y))) && match(Op0, m_Zero())) - if (Value *XNeg = freelyNegateValue(X)) - return BinaryOperator::CreateShl(XNeg, Y); - // Subtracting -1/0 is the same as adding 1/0: // sub [nsw] Op0, sext(bool Y) -> add [nsw] Op0, zext(bool Y) // 'nuw' is dropped in favor of the canonical form. diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -862,15 +862,82 @@ if (Value *NegV = dyn_castNegVal(V)) return NegV; - if (!V->hasOneUse()) + Instruction *I = dyn_cast(V); + if (!I) return nullptr; - Value *A, *B; + unsigned BitWidth = I->getType()->getScalarSizeInBits(); + switch (I->getOpcode()) { + // 0-(zext i1 A) => sext i1 A + case Instruction::ZExt: + if (I->getOperand(0)->getType()->isIntOrIntVectorTy(1)) + return Builder.CreateSExtOrBitCast( + I->getOperand(0), I->getType(), I->getName() + ".neg"); + return nullptr; + + // 0-(sext i1 A) => zext i1 A + case Instruction::SExt: + if (I->getOperand(0)->getType()->isIntOrIntVectorTy(1)) + return Builder.CreateZExtOrBitCast( + I->getOperand(0), I->getType(), I->getName() + ".neg"); + return nullptr; + + // 0-(A lshr (BW-1)) => A ashr (BW-1) + case Instruction::LShr: + if (match(I->getOperand(1), m_SpecificInt(BitWidth - 1))) + return Builder.CreateAShr( + I->getOperand(0), I->getOperand(1), + I->getName() + ".neg", cast(I)->isExact()); + return nullptr; + + // 0-(A ashr (BW-1)) => A lshr (BW-1) + case Instruction::AShr: + if (match(I->getOperand(1), m_SpecificInt(BitWidth - 1))) + return Builder.CreateLShr( + I->getOperand(0), I->getOperand(1), + I->getName() + ".neg", cast(I)->isExact()); + return nullptr; + + default: + break; + } + + // TODO: The "sub" pattern below could also be applied without the one-use + // restriction. Not allowing it for now in line with existing behavior. + if (!I->hasOneUse()) + return nullptr; + + switch (I->getOpcode()) { // 0-(A-B) => B-A - if (match(V, m_Sub(m_Value(A), m_Value(B)))) - return Builder.CreateSub(B, A); + case Instruction::Sub: + return Builder.CreateSub( + I->getOperand(1), I->getOperand(0), I->getName() + ".neg"); + + // 0-(A sdiv C) => A sdiv (0-C) provided the negation doesn't overflow. + case Instruction::SDiv: { + Constant *C = dyn_cast(I->getOperand(1)); + if (C && !C->containsUndefElement() && C->isNotMinSignedValue() && + C->isNotOneValue()) + return Builder.CreateSDiv(I->getOperand(0), ConstantExpr::getNeg(C), + I->getName() + ".neg", cast(I)->isExact()); + return nullptr; + } - return nullptr; + // 0-(A< (0-A)<getOperand(0))) + return Builder.CreateShl(NegA, I->getOperand(1), I->getName() + ".neg"); + return nullptr; + + // 0-(trunc A) => trunc (0-A) + case Instruction::Trunc: + if (Value *NegA = freelyNegateValue(I->getOperand(0))) + return Builder.CreateTrunc(NegA, I->getType(), I->getName() + ".neg"); + return nullptr; + + default: + return nullptr; + } } static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO, diff --git a/llvm/test/Transforms/InstCombine/sub.ll b/llvm/test/Transforms/InstCombine/sub.ll --- a/llvm/test/Transforms/InstCombine/sub.ll +++ b/llvm/test/Transforms/InstCombine/sub.ll @@ -300,8 +300,8 @@ define i32 @test12(i32 %A) { ; CHECK-LABEL: @test12( -; CHECK-NEXT: [[C:%.*]] = lshr i32 [[A:%.*]], 31 -; CHECK-NEXT: ret i32 [[C]] +; CHECK-NEXT: [[B_NEG:%.*]] = lshr i32 [[A:%.*]], 31 +; CHECK-NEXT: ret i32 [[B_NEG]] ; %B = ashr i32 %A, 31 %C = sub i32 0, %B @@ -310,8 +310,8 @@ define i32 @test13(i32 %A) { ; CHECK-LABEL: @test13( -; CHECK-NEXT: [[C:%.*]] = ashr i32 [[A:%.*]], 31 -; CHECK-NEXT: ret i32 [[C]] +; CHECK-NEXT: [[B_NEG:%.*]] = ashr i32 [[A:%.*]], 31 +; CHECK-NEXT: ret i32 [[B_NEG]] ; %B = lshr i32 %A, 31 %C = sub i32 0, %B @@ -320,8 +320,8 @@ define <2 x i32> @test12vec(<2 x i32> %A) { ; CHECK-LABEL: @test12vec( -; CHECK-NEXT: [[C:%.*]] = lshr <2 x i32> [[A:%.*]], -; CHECK-NEXT: ret <2 x i32> [[C]] +; CHECK-NEXT: [[B_NEG:%.*]] = lshr <2 x i32> [[A:%.*]], +; CHECK-NEXT: ret <2 x i32> [[B_NEG]] ; %B = ashr <2 x i32> %A, %C = sub <2 x i32> zeroinitializer, %B @@ -330,8 +330,8 @@ define <2 x i32> @test13vec(<2 x i32> %A) { ; CHECK-LABEL: @test13vec( -; CHECK-NEXT: [[C:%.*]] = ashr <2 x i32> [[A:%.*]], -; CHECK-NEXT: ret <2 x i32> [[C]] +; CHECK-NEXT: [[B_NEG:%.*]] = ashr <2 x i32> [[A:%.*]], +; CHECK-NEXT: ret <2 x i32> [[B_NEG]] ; %B = lshr <2 x i32> %A, %C = sub <2 x i32> zeroinitializer, %B @@ -351,8 +351,8 @@ define i32 @test16(i32 %A) { ; CHECK-LABEL: @test16( -; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[A:%.*]], -1123 -; CHECK-NEXT: ret i32 [[Y]] +; CHECK-NEXT: [[X_NEG:%.*]] = sdiv i32 [[A:%.*]], -1123 +; CHECK-NEXT: ret i32 [[X_NEG]] ; %X = sdiv i32 %A, 1123 %Y = sub i32 0, %X @@ -507,8 +507,8 @@ define i64 @test25(i8* %P, i64 %A){ ; CHECK-LABEL: @test25( ; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i64 [[A:%.*]], 1 -; CHECK-NEXT: [[DIFF_NEG:%.*]] = add i64 [[B_IDX]], -84 -; CHECK-NEXT: ret i64 [[DIFF_NEG]] +; CHECK-NEXT: [[DOTNEG:%.*]] = add i64 [[B_IDX]], -84 +; CHECK-NEXT: ret i64 [[DOTNEG]] ; %B = getelementptr inbounds [42 x i16], [42 x i16]* @Arr, i64 0, i64 %A %C = ptrtoint i16* %B to i64 @@ -522,8 +522,8 @@ ; CHECK-LABEL: @test25_as1( ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i16 ; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i16 [[TMP1]], 1 -; CHECK-NEXT: [[DIFF_NEG:%.*]] = add i16 [[B_IDX]], -84 -; CHECK-NEXT: ret i16 [[DIFF_NEG]] +; CHECK-NEXT: [[DOTNEG:%.*]] = add i16 [[B_IDX]], -84 +; CHECK-NEXT: ret i16 [[DOTNEG]] ; %B = getelementptr inbounds [42 x i16], [42 x i16] addrspace(1)* @Arr_as1, i64 0, i64 %A %C = ptrtoint i16 addrspace(1)* %B to i16 @@ -533,8 +533,8 @@ define i32 @test26(i32 %x) { ; CHECK-LABEL: @test26( -; CHECK-NEXT: [[NEG:%.*]] = shl i32 -3, [[X:%.*]] -; CHECK-NEXT: ret i32 [[NEG]] +; CHECK-NEXT: [[SHL_NEG:%.*]] = shl i32 -3, [[X:%.*]] +; CHECK-NEXT: ret i32 [[SHL_NEG]] ; %shl = shl i32 3, %x %neg = sub i32 0, %shl @@ -543,9 +543,9 @@ define i64 @test_neg_shl_sub(i64 %a, i64 %b) { ; CHECK-LABEL: @test_neg_shl_sub( -; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[NEG:%.*]] = shl i64 [[TMP1]], 2 -; CHECK-NEXT: ret i64 [[NEG]] +; CHECK-NEXT: [[SUB_NEG:%.*]] = sub i64 [[B:%.*]], [[A:%.*]] +; CHECK-NEXT: [[MUL_NEG:%.*]] = shl i64 [[SUB_NEG]], 2 +; CHECK-NEXT: ret i64 [[MUL_NEG]] ; %sub = sub i64 %a, %b %mul = shl i64 %sub, 2 @@ -585,10 +585,9 @@ define i64 @test_neg_shl_div(i64 %a) { ; CHECK-LABEL: @test_neg_shl_div( -; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 [[A:%.*]], 3 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[DIV]], 2 -; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[SHL]] -; CHECK-NEXT: ret i64 [[NEG]] +; CHECK-NEXT: [[DIV_NEG:%.*]] = sdiv i64 [[A:%.*]], -3 +; CHECK-NEXT: [[SHL_NEG:%.*]] = shl i64 [[DIV_NEG]], 2 +; CHECK-NEXT: ret i64 [[SHL_NEG]] ; %div = sdiv i64 %a, 3 %shl = shl i64 %div, 2 @@ -598,10 +597,9 @@ define i64 @test_neg_shl_zext_i1(i1 %a, i64 %b) { ; CHECK-LABEL: @test_neg_shl_zext_i1( -; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[A:%.*]] to i64 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[EXT]], [[B:%.*]] -; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[SHL]] -; CHECK-NEXT: ret i64 [[NEG]] +; CHECK-NEXT: [[EXT_NEG:%.*]] = sext i1 [[A:%.*]] to i64 +; CHECK-NEXT: [[SHL_NEG:%.*]] = shl i64 [[EXT_NEG]], [[B:%.*]] +; CHECK-NEXT: ret i64 [[SHL_NEG]] ; %ext = zext i1 %a to i64 %shl = shl i64 %ext, %b @@ -611,10 +609,9 @@ define i64 @test_neg_shl_sext_i1(i1 %a, i64 %b) { ; CHECK-LABEL: @test_neg_shl_sext_i1( -; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[A:%.*]] to i64 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[EXT]], [[B:%.*]] -; CHECK-NEXT: [[NEG:%.*]] = sub i64 0, [[SHL]] -; CHECK-NEXT: ret i64 [[NEG]] +; CHECK-NEXT: [[EXT_NEG:%.*]] = zext i1 [[A:%.*]] to i64 +; CHECK-NEXT: [[SHL_NEG:%.*]] = shl i64 [[EXT_NEG]], [[B:%.*]] +; CHECK-NEXT: ret i64 [[SHL_NEG]] ; %ext = sext i1 %a to i64 %shl = shl i64 %ext, %b @@ -625,9 +622,9 @@ define i64 @test_neg_zext_i1_extra_use(i1 %a, i64 %b, i64* %p) { ; CHECK-LABEL: @test_neg_zext_i1_extra_use( ; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[A:%.*]] to i64 -; CHECK-NEXT: [[NEG:%.*]] = sext i1 [[A]] to i64 +; CHECK-NEXT: [[EXT_NEG:%.*]] = sext i1 [[A]] to i64 ; CHECK-NEXT: store i64 [[EXT]], i64* [[P:%.*]], align 8 -; CHECK-NEXT: ret i64 [[NEG]] +; CHECK-NEXT: ret i64 [[EXT_NEG]] ; %ext = zext i1 %a to i64 %neg = sub i64 0, %ext @@ -638,9 +635,9 @@ define i64 @test_neg_sext_i1_extra_use(i1 %a, i64 %b, i64* %p) { ; CHECK-LABEL: @test_neg_sext_i1_extra_use( ; CHECK-NEXT: [[EXT:%.*]] = sext i1 [[A:%.*]] to i64 -; CHECK-NEXT: [[NEG:%.*]] = zext i1 [[A]] to i64 +; CHECK-NEXT: [[EXT_NEG:%.*]] = zext i1 [[A]] to i64 ; CHECK-NEXT: store i64 [[EXT]], i64* [[P:%.*]], align 8 -; CHECK-NEXT: ret i64 [[NEG]] +; CHECK-NEXT: ret i64 [[EXT_NEG]] ; %ext = sext i1 %a to i64 %neg = sub i64 0, %ext @@ -650,11 +647,10 @@ define i32 @test_neg_trunc_shl_sub(i64 %a, i64 %b) { ; CHECK-LABEL: @test_neg_trunc_shl_sub( -; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[SUB_TR:%.*]] = trunc i64 [[SUB]] to i32 -; CHECK-NEXT: [[TRUNC:%.*]] = shl i32 [[SUB_TR]], 2 -; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[TRUNC]] -; CHECK-NEXT: ret i32 [[NEG]] +; CHECK-NEXT: [[SUB_NEG:%.*]] = sub i64 [[B:%.*]], [[A:%.*]] +; CHECK-NEXT: [[SUB_TR_NEG:%.*]] = trunc i64 [[SUB_NEG]] to i32 +; CHECK-NEXT: [[TRUNC_NEG:%.*]] = shl i32 [[SUB_TR_NEG]], 2 +; CHECK-NEXT: ret i32 [[TRUNC_NEG]] ; %sub = sub i64 %a, %b %shl = shl i64 %sub, 2 @@ -665,11 +661,10 @@ define i32 @test_neg_trunc_shl_ashr(i64 %a, i64 %b) { ; CHECK-LABEL: @test_neg_trunc_shl_ashr( -; CHECK-NEXT: [[SHR:%.*]] = ashr i64 [[A:%.*]], 63 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[SHR]], [[B:%.*]] -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[SHL]] to i32 -; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[TRUNC]] -; CHECK-NEXT: ret i32 [[NEG]] +; CHECK-NEXT: [[SHR_NEG:%.*]] = lshr i64 [[A:%.*]], 63 +; CHECK-NEXT: [[SHL_NEG:%.*]] = shl i64 [[SHR_NEG]], [[B:%.*]] +; CHECK-NEXT: [[TRUNC_NEG:%.*]] = trunc i64 [[SHL_NEG]] to i32 +; CHECK-NEXT: ret i32 [[TRUNC_NEG]] ; %shr = ashr i64 %a, 63 %shl = shl i64 %shr, %b @@ -680,11 +675,10 @@ define i32 @test_neg_trunc_shl_lshr(i64 %a, i64 %b) { ; CHECK-LABEL: @test_neg_trunc_shl_lshr( -; CHECK-NEXT: [[SHR:%.*]] = lshr i64 [[A:%.*]], 63 -; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[SHR]], [[B:%.*]] -; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[SHL]] to i32 -; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[TRUNC]] -; CHECK-NEXT: ret i32 [[NEG]] +; CHECK-NEXT: [[SHR_NEG:%.*]] = ashr i64 [[A:%.*]], 63 +; CHECK-NEXT: [[SHL_NEG:%.*]] = shl i64 [[SHR_NEG]], [[B:%.*]] +; CHECK-NEXT: [[TRUNC_NEG:%.*]] = trunc i64 [[SHL_NEG]] to i32 +; CHECK-NEXT: ret i32 [[TRUNC_NEG]] ; %shr = lshr i64 %a, 63 %shl = shl i64 %shr, %b @@ -835,8 +829,8 @@ define i64 @test29(i8* %foo, i64 %i, i64 %j) { ; CHECK-LABEL: @test29( -; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[I:%.*]], [[J:%.*]] -; CHECK-NEXT: ret i64 [[TMP1]] +; CHECK-NEXT: [[DOTNEG:%.*]] = sub i64 [[I:%.*]], [[J:%.*]] +; CHECK-NEXT: ret i64 [[DOTNEG]] ; %gep1 = getelementptr inbounds i8, i8* %foo, i64 %i %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j @@ -849,8 +843,8 @@ define i64 @test30(i8* %foo, i64 %i, i64 %j) { ; CHECK-LABEL: @test30( ; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i64 [[I:%.*]], 2 -; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[GEP1_IDX]], [[J:%.*]] -; CHECK-NEXT: ret i64 [[TMP1]] +; CHECK-NEXT: [[DOTNEG:%.*]] = sub i64 [[GEP1_IDX]], [[J:%.*]] +; CHECK-NEXT: ret i64 [[DOTNEG]] ; %bit = bitcast i8* %foo to i32* %gep1 = getelementptr inbounds i32, i32* %bit, i64 %i @@ -864,8 +858,8 @@ define i16 @test30_as1(i8 addrspace(1)* %foo, i16 %i, i16 %j) { ; CHECK-LABEL: @test30_as1( ; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[I:%.*]], 2 -; CHECK-NEXT: [[TMP1:%.*]] = sub i16 [[GEP1_IDX]], [[J:%.*]] -; CHECK-NEXT: ret i16 [[TMP1]] +; CHECK-NEXT: [[DOTNEG:%.*]] = sub i16 [[GEP1_IDX]], [[J:%.*]] +; CHECK-NEXT: ret i16 [[DOTNEG]] ; %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)* %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i16 %i @@ -919,8 +913,8 @@ define <2 x i32> @test37(<2 x i32> %A) { ; CHECK-LABEL: @test37( ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i32> [[A:%.*]], -; CHECK-NEXT: [[SUB:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32> -; CHECK-NEXT: ret <2 x i32> [[SUB]] +; CHECK-NEXT: [[DIV_NEG:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32> +; CHECK-NEXT: ret <2 x i32> [[DIV_NEG]] ; %div = sdiv <2 x i32> %A, %sub = sub nsw <2 x i32> zeroinitializer, %div @@ -930,8 +924,8 @@ define i32 @test38(i32 %A) { ; CHECK-LABEL: @test38( ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[A:%.*]], -2147483648 -; CHECK-NEXT: [[SUB:%.*]] = sext i1 [[TMP1]] to i32 -; CHECK-NEXT: ret i32 [[SUB]] +; CHECK-NEXT: [[DIV_NEG:%.*]] = sext i1 [[TMP1]] to i32 +; CHECK-NEXT: ret i32 [[DIV_NEG]] ; %div = sdiv i32 %A, -2147483648 %sub = sub nsw i32 0, %div @@ -1246,8 +1240,8 @@ ; CHECK-LABEL: @test58( ; CHECK-NEXT: [[GEP2_OFFS:%.*]] = add i64 [[J:%.*]], 4200 ; CHECK-NEXT: [[GEP1_OFFS:%.*]] = add i64 [[I:%.*]], 4200 -; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[GEP1_OFFS]], [[GEP2_OFFS]] -; CHECK-NEXT: ret i64 [[TMP1]] +; CHECK-NEXT: [[DOTNEG:%.*]] = sub i64 [[GEP1_OFFS]], [[GEP2_OFFS]] +; CHECK-NEXT: ret i64 [[DOTNEG]] ; %gep1 = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* %foo, i64 0, i64 42, i64 %i %gep2 = getelementptr inbounds [100 x [100 x i8]], [100 x [100 x i8]]* %foo, i64 0, i64 42, i64 %j