Index: llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp =================================================================== --- llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -957,59 +957,64 @@ } // Checks if any operand is negative and we can convert add to sub. -// This function checks for following negative patterns -// ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C)) -// TODO: ADD(XOR(AND(Z, ~C), ~C), 1) == NEG(OR(Z, C)) if C is even -// TODO: XOR(AND(Z, ~C), (~C + 1)) == NEG(OR(Z, C)) if C is odd -Value *checkForNegativeOperand(BinaryOperator &I, - InstCombiner::BuilderTy *Builder) { - Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); - - // This function creates 2 instructions to replace ADD, we need at least one - // of LHS or RHS to have one use to ensure benefit in transform. - if (!LHS->hasOneUse() && !RHS->hasOneUse()) - return nullptr; - - bool IHasNSW = I.hasNoSignedWrap(); - bool IHasNUW = I.hasNoUnsignedWrap(); - - Value *X = nullptr, *Y = nullptr, *Z = nullptr; - const APInt *C1 = nullptr, *C2 = nullptr; - - // if ONE is on other side, swap - if (match(RHS, m_Add(m_Value(X), m_One()))) - std::swap(LHS, RHS); - - if (match(LHS, m_Add(m_Value(X), m_One()))) { - // if XOR on other side, swap - if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1)))) - std::swap(X, RHS); - - // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1)) - // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1)) - if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) { - if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) { - Value *NewAnd = Builder->CreateAnd(Z, *C1); - return Builder->CreateSub(RHS, NewAnd, "", IHasNUW, IHasNSW); - } - } - } - - return nullptr; -} - -Instruction *InstCombiner::visitAdd(BinaryOperator &I) { - bool Changed = SimplifyAssociativeOrCommutative(I); - Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); + // This function checks for following negative patterns + // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C)) + // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C)) if C is odd + // TODO: XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even + Value *checkForNegativeOperand(BinaryOperator &I, + InstCombiner::BuilderTy *Builder) { + Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); + + // This function creates 2 instructions to replace ADD, we need at least one + // of LHS or RHS to have one use to ensure benefit in transform. + if (!LHS->hasOneUse() && !RHS->hasOneUse()) + return nullptr; + + Value *X = nullptr, *Y = nullptr, *Z = nullptr; + const APInt *C1 = nullptr, *C2 = nullptr; + + // if ONE is on other side, swap + if (match(RHS, m_Add(m_Value(X), m_One()))) + std::swap(LHS, RHS); + + if (match(LHS, m_Add(m_Value(X), m_One()))) { + // if XOR on other side, swap + if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1)))) + std::swap(X, RHS); + + if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) { + // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1)) + // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1)) + if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) { + Value *NewAnd = Builder->CreateAnd(Z, *C1); + return Builder->CreateSub(RHS, NewAnd, "sub"); + } else if (C1->countTrailingZeros() == 0) { + // if C1 is ODD and + // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1)) + // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1)) + if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) { + Value *NewOr = Builder->CreateOr(Z, ~(*C1)); + return Builder->CreateSub(RHS, NewOr, "sub"); + } + } + } + } - if (Value *V = SimplifyVectorOp(I)) - return ReplaceInstUsesWith(I, V); + return nullptr; + } - if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(), - I.hasNoUnsignedWrap(), DL)) - return ReplaceInstUsesWith(I, V); + Instruction *InstCombiner::visitAdd(BinaryOperator &I) { + bool Changed = SimplifyAssociativeOrCommutative(I); + Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); + + if (Value *V = SimplifyVectorOp(I)) + return ReplaceInstUsesWith(I, V); + + if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(), + I.hasNoUnsignedWrap(), DL)) + return ReplaceInstUsesWith(I, V); - // (A*B)+(A*C) -> A*(B+C) etc + // (A*B)+(A*C) -> A*(B+C) etc if (Value *V = SimplifyUsingDistributiveLaws(I)) return ReplaceInstUsesWith(I, V); Index: llvm/trunk/test/Transforms/InstCombine/add2.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/add2.ll +++ llvm/trunk/test/Transforms/InstCombine/add2.ll @@ -150,6 +150,32 @@ ; CHECK-NEXT: ret i32 [[SUB]] } +define i32 @test15(i32 %x, i32 %y) { + %x.not = and i32 %x, -1431655767 + %neg = xor i32 %x.not, -1431655767 + %add = add i32 %y, 1 + %add1 = add i32 %add, %neg + ret i32 %add1 +; CHECK-LABEL: @test15( +; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 %x, 1431655766 +; CHECK-NEXT: [[SUB:%[a-z0-9]+]] = sub i32 %y, [[OR]] +; CHECK-NEXT: ret i32 [[SUB]] +} + +define i32 @test16(i32 %x, i32 %y) { + %shr = ashr i32 %x, 3 + %shr.not = and i32 %shr, -1431655767 + %neg = xor i32 %shr.not, -1431655767 + %add = add i32 %y, 1 + %add1 = add i32 %add, %neg + ret i32 %add1 +; CHECK-LABEL: @test16( +; CHECK-NEXT: [[SHR:%[a-z0-9]+]] = ashr i32 %x, 3 +; CHECK-NEXT: [[OR:%[a-z0-9]+]] = or i32 [[SHR]], 1431655766 +; CHECK-NEXT: [[SUB:%[a-z0-9]+]] = sub i32 %y, [[OR]] +; CHECK-NEXT: ret i32 [[SUB]] +} + define i16 @add_nsw_mul_nsw(i16 %x) { %add1 = add nsw i16 %x, %x %add2 = add nsw i16 %add1, %x