Index: llvm/include/llvm/Analysis/ValueTracking.h =================================================================== --- llvm/include/llvm/Analysis/ValueTracking.h +++ llvm/include/llvm/Analysis/ValueTracking.h @@ -32,6 +32,7 @@ class DominatorTree; class GEPOperator; class IntrinsicInst; +class WithOverflowInst; struct KnownBits; class Loop; class LoopInfo; @@ -454,10 +455,10 @@ const Instruction *CxtI, const DominatorTree *DT); - /// Returns true if the arithmetic part of the \p II 's result is + /// Returns true if the arithmetic part of the \p WO 's result is /// used only along the paths control dependent on the computation - /// not overflowing, \p II being an .with.overflow intrinsic. - bool isOverflowIntrinsicNoWrap(const IntrinsicInst *II, + /// not overflowing, \p WO being an .with.overflow intrinsic. + bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT); Index: llvm/include/llvm/IR/IntrinsicInst.h =================================================================== --- llvm/include/llvm/IR/IntrinsicInst.h +++ llvm/include/llvm/IR/IntrinsicInst.h @@ -265,6 +265,39 @@ } }; + /// This class represents a op.with.overflow intrinsic. + class WithOverflowInst : public IntrinsicInst { + public: + static bool classof(const IntrinsicInst *I) { + switch (I->getIntrinsicID()) { + case Intrinsic::uadd_with_overflow: + case Intrinsic::sadd_with_overflow: + case Intrinsic::usub_with_overflow: + case Intrinsic::ssub_with_overflow: + case Intrinsic::umul_with_overflow: + case Intrinsic::smul_with_overflow: + return true; + default: + return false; + } + } + static bool classof(const Value *V) { + return isa(V) && classof(cast(V)); + } + + Value *getLHS() const { return const_cast(getArgOperand(0)); } + Value *getRHS() const { return const_cast(getArgOperand(1)); } + + /// Returns the binary operation underlying the intrinsic. + Instruction::BinaryOps getBinaryOp() const; + + /// Whether the intrinsic is signed or unsigned. + bool isSigned() const; + + /// Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap. + unsigned getNoWrapKind() const; + }; + /// Common base class for all memory intrinsics. Simply provides /// common methods. /// Written as CRTP to avoid a common base class amongst the Index: llvm/lib/Analysis/ScalarEvolution.cpp =================================================================== --- llvm/lib/Analysis/ScalarEvolution.cpp +++ llvm/lib/Analysis/ScalarEvolution.cpp @@ -4575,52 +4575,21 @@ if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0) break; - auto *CI = dyn_cast(EVI->getAggregateOperand()); - if (!CI) + auto *WO = dyn_cast(EVI->getAggregateOperand()); + if (!WO) break; - if (auto *F = CI->getCalledFunction()) - switch (F->getIntrinsicID()) { - case Intrinsic::sadd_with_overflow: - case Intrinsic::uadd_with_overflow: - if (!isOverflowIntrinsicNoWrap(cast(CI), DT)) - return BinaryOp(Instruction::Add, CI->getArgOperand(0), - CI->getArgOperand(1)); - - // Now that we know that all uses of the arithmetic-result component of - // CI are guarded by the overflow check, we can go ahead and pretend - // that the arithmetic is non-overflowing. - if (F->getIntrinsicID() == Intrinsic::sadd_with_overflow) - return BinaryOp(Instruction::Add, CI->getArgOperand(0), - CI->getArgOperand(1), /* IsNSW = */ true, - /* IsNUW = */ false); - else - return BinaryOp(Instruction::Add, CI->getArgOperand(0), - CI->getArgOperand(1), /* IsNSW = */ false, - /* IsNUW*/ true); - case Intrinsic::ssub_with_overflow: - case Intrinsic::usub_with_overflow: - if (!isOverflowIntrinsicNoWrap(cast(CI), DT)) - return BinaryOp(Instruction::Sub, CI->getArgOperand(0), - CI->getArgOperand(1)); - - // The same reasoning as sadd/uadd above. - if (F->getIntrinsicID() == Intrinsic::ssub_with_overflow) - return BinaryOp(Instruction::Sub, CI->getArgOperand(0), - CI->getArgOperand(1), /* IsNSW = */ true, - /* IsNUW = */ false); - else - return BinaryOp(Instruction::Sub, CI->getArgOperand(0), - CI->getArgOperand(1), /* IsNSW = */ false, - /* IsNUW = */ true); - case Intrinsic::smul_with_overflow: - case Intrinsic::umul_with_overflow: - return BinaryOp(Instruction::Mul, CI->getArgOperand(0), - CI->getArgOperand(1)); - default: - break; - } - break; + Instruction::BinaryOps BinOp = WO->getBinaryOp(); + bool Signed = WO->isSigned(); + // TODO: Should add nuw/nsw flags for mul as well. + if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT)) + return BinaryOp(BinOp, WO->getLHS(), WO->getRHS()); + + // Now that we know that all uses of the arithmetic-result component of + // CI are guarded by the overflow check, we can go ahead and pretend + // that the arithmetic is non-overflowing. + return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(), + /* IsNSW = */ Signed, /* IsNUW = */ !Signed); } default: Index: llvm/lib/Analysis/ValueTracking.cpp =================================================================== --- llvm/lib/Analysis/ValueTracking.cpp +++ llvm/lib/Analysis/ValueTracking.cpp @@ -4206,23 +4206,12 @@ return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange)); } -bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, +bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT) { -#ifndef NDEBUG - auto IID = II->getIntrinsicID(); - assert((IID == Intrinsic::sadd_with_overflow || - IID == Intrinsic::uadd_with_overflow || - IID == Intrinsic::ssub_with_overflow || - IID == Intrinsic::usub_with_overflow || - IID == Intrinsic::smul_with_overflow || - IID == Intrinsic::umul_with_overflow) && - "Not an overflow intrinsic!"); -#endif - SmallVector GuardingBranches; SmallVector Results; - for (const User *U : II->users()) { + for (const User *U : WO->users()) { if (const auto *EVI = dyn_cast(U)) { assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); Index: llvm/lib/IR/IntrinsicInst.cpp =================================================================== --- llvm/lib/IR/IntrinsicInst.cpp +++ llvm/lib/IR/IntrinsicInst.cpp @@ -21,6 +21,7 @@ //===----------------------------------------------------------------------===// #include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Operator.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfoMetadata.h" @@ -168,3 +169,36 @@ } } +Instruction::BinaryOps WithOverflowInst::getBinaryOp() const { + switch (getIntrinsicID()) { + case Intrinsic::uadd_with_overflow: + case Intrinsic::sadd_with_overflow: + return Instruction::Add; + case Intrinsic::usub_with_overflow: + case Intrinsic::ssub_with_overflow: + return Instruction::Sub; + case Intrinsic::umul_with_overflow: + case Intrinsic::smul_with_overflow: + return Instruction::Mul; + default: + llvm_unreachable("Invalid intrinsic"); + } +} + +bool WithOverflowInst::isSigned() const { + switch (getIntrinsicID()) { + case Intrinsic::sadd_with_overflow: + case Intrinsic::ssub_with_overflow: + case Intrinsic::smul_with_overflow: + return true; + default: + return false; + } +} + +unsigned WithOverflowInst::getNoWrapKind() const { + if (isSigned()) + return OverflowingBinaryOperator::NoSignedWrap; + else + return OverflowingBinaryOperator::NoUnsignedWrap; +} Index: llvm/lib/Transforms/InstCombine/InstructionCombining.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2664,53 +2664,28 @@ return ExtractValueInst::Create(IV->getInsertedValueOperand(), makeArrayRef(exti, exte)); } - if (IntrinsicInst *II = dyn_cast(Agg)) { - // We're extracting from an intrinsic, see if we're the only user, which - // allows us to simplify multiple result intrinsics to simpler things that - // just get one value. - if (II->hasOneUse()) { - // Check if we're grabbing the overflow bit or the result of a 'with - // overflow' intrinsic. If it's the latter we can remove the intrinsic + if (WithOverflowInst *WO = dyn_cast(Agg)) { + // We're extracting from an overflow intrinsic, see if we're the only user, + // which allows us to simplify multiple result intrinsics to simpler + // things that just get one value. + if (WO->hasOneUse()) { + // Check if we're grabbing only the result of a 'with overflow' intrinsic // and replace it with a traditional binary instruction. - switch (II->getIntrinsicID()) { - case Intrinsic::uadd_with_overflow: - case Intrinsic::sadd_with_overflow: - if (*EV.idx_begin() == 0) { // Normal result. - Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); - replaceInstUsesWith(*II, UndefValue::get(II->getType())); - eraseInstFromFunction(*II); - return BinaryOperator::CreateAdd(LHS, RHS); - } - - // If the normal result of the add is dead, and the RHS is a constant, - // we can transform this into a range comparison. - // overflow = uadd a, -4 --> overflow = icmp ugt a, 3 - if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow) - if (ConstantInt *CI = dyn_cast(II->getArgOperand(1))) - return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0), - ConstantExpr::getNot(CI)); - break; - case Intrinsic::usub_with_overflow: - case Intrinsic::ssub_with_overflow: - if (*EV.idx_begin() == 0) { // Normal result. - Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); - replaceInstUsesWith(*II, UndefValue::get(II->getType())); - eraseInstFromFunction(*II); - return BinaryOperator::CreateSub(LHS, RHS); - } - break; - case Intrinsic::umul_with_overflow: - case Intrinsic::smul_with_overflow: - if (*EV.idx_begin() == 0) { // Normal result. - Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); - replaceInstUsesWith(*II, UndefValue::get(II->getType())); - eraseInstFromFunction(*II); - return BinaryOperator::CreateMul(LHS, RHS); - } - break; - default: - break; + if (*EV.idx_begin() == 0) { + Instruction::BinaryOps BinOp = WO->getBinaryOp(); + Value *LHS = WO->getLHS(), *RHS = WO->getRHS(); + replaceInstUsesWith(*WO, UndefValue::get(WO->getType())); + eraseInstFromFunction(*WO); + return BinaryOperator::Create(BinOp, LHS, RHS); } + + // If the normal result of the add is dead, and the RHS is a constant, + // we can transform this into a range comparison. + // overflow = uadd a, -4 --> overflow = icmp ugt a, 3 + if (WO->getIntrinsicID() == Intrinsic::uadd_with_overflow) + if (ConstantInt *CI = dyn_cast(WO->getRHS())) + return new ICmpInst(ICmpInst::ICMP_UGT, WO->getLHS(), + ConstantExpr::getNot(CI)); } } if (LoadInst *L = dyn_cast(Agg)) Index: llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp =================================================================== --- llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp +++ llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp @@ -399,59 +399,38 @@ } // See if we can prove that the given overflow intrinsic will not overflow. -static bool willNotOverflow(IntrinsicInst *II, LazyValueInfo *LVI) { - using OBO = OverflowingBinaryOperator; - auto NoWrap = [&] (Instruction::BinaryOps BinOp, unsigned NoWrapKind) { - Value *RHS = II->getOperand(1); - ConstantRange RRange = LVI->getConstantRange(RHS, II->getParent(), II); - ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion( - BinOp, RRange, NoWrapKind); - // As an optimization, do not compute LRange if we do not need it. - if (NWRegion.isEmptySet()) - return false; - Value *LHS = II->getOperand(0); - ConstantRange LRange = LVI->getConstantRange(LHS, II->getParent(), II); - return NWRegion.contains(LRange); - }; - switch (II->getIntrinsicID()) { - default: - break; - case Intrinsic::uadd_with_overflow: - return NoWrap(Instruction::Add, OBO::NoUnsignedWrap); - case Intrinsic::sadd_with_overflow: - return NoWrap(Instruction::Add, OBO::NoSignedWrap); - case Intrinsic::usub_with_overflow: - return NoWrap(Instruction::Sub, OBO::NoUnsignedWrap); - case Intrinsic::ssub_with_overflow: - return NoWrap(Instruction::Sub, OBO::NoSignedWrap); - } - return false; +static bool willNotOverflow(WithOverflowInst *WO, LazyValueInfo *LVI) { + // TODO: Also support multiplication. + Instruction::BinaryOps BinOp = WO->getBinaryOp(); + if (BinOp == Instruction::Mul) + return false; + + Value *RHS = WO->getRHS(); + ConstantRange RRange = LVI->getConstantRange(RHS, WO->getParent(), WO); + ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion( + BinOp, RRange, WO->getNoWrapKind()); + // As an optimization, do not compute LRange if we do not need it. + if (NWRegion.isEmptySet()) + return false; + Value *LHS = WO->getLHS(); + ConstantRange LRange = LVI->getConstantRange(LHS, WO->getParent(), WO); + return NWRegion.contains(LRange); } -static void processOverflowIntrinsic(IntrinsicInst *II) { - IRBuilder<> B(II); - Value *NewOp = nullptr; - switch (II->getIntrinsicID()) { - default: - llvm_unreachable("Unexpected instruction."); - case Intrinsic::uadd_with_overflow: - NewOp = B.CreateNUWAdd(II->getOperand(0), II->getOperand(1), II->getName()); - break; - case Intrinsic::sadd_with_overflow: - NewOp = B.CreateNSWAdd(II->getOperand(0), II->getOperand(1), II->getName()); - break; - case Intrinsic::usub_with_overflow: - NewOp = B.CreateNUWSub(II->getOperand(0), II->getOperand(1), II->getName()); - break; - case Intrinsic::ssub_with_overflow: - NewOp = B.CreateNSWSub(II->getOperand(0), II->getOperand(1), II->getName()); - break; - } +static void processOverflowIntrinsic(WithOverflowInst *WO) { + IRBuilder<> B(WO); + Value *NewOp = B.CreateBinOp( + WO->getBinaryOp(), WO->getLHS(), WO->getRHS(), WO->getName()); + if (WO->isSigned()) + cast(NewOp)->setHasNoSignedWrap(); + else + cast(NewOp)->setHasNoUnsignedWrap(); + + Value *NewI = B.CreateInsertValue(UndefValue::get(WO->getType()), NewOp, 0); + NewI = B.CreateInsertValue(NewI, ConstantInt::getFalse(WO->getContext()), 1); + WO->replaceAllUsesWith(NewI); + WO->eraseFromParent(); ++NumOverflows; - Value *NewI = B.CreateInsertValue(UndefValue::get(II->getType()), NewOp, 0); - NewI = B.CreateInsertValue(NewI, ConstantInt::getFalse(II->getContext()), 1); - II->replaceAllUsesWith(NewI); - II->eraseFromParent(); } /// Infer nonnull attributes for the arguments at the specified callsite. @@ -459,9 +438,9 @@ SmallVector ArgNos; unsigned ArgNo = 0; - if (auto *II = dyn_cast(CS.getInstruction())) { - if (willNotOverflow(II, LVI)) { - processOverflowIntrinsic(II); + if (auto *WO = dyn_cast(CS.getInstruction())) { + if (willNotOverflow(WO, LVI)) { + processOverflowIntrinsic(WO); return true; } } Index: llvm/lib/Transforms/Scalar/GVN.cpp =================================================================== --- llvm/lib/Transforms/Scalar/GVN.cpp +++ llvm/lib/Transforms/Scalar/GVN.cpp @@ -329,36 +329,15 @@ e.type = EI->getType(); e.opcode = 0; - IntrinsicInst *I = dyn_cast(EI->getAggregateOperand()); - if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) { - // EI might be an extract from one of our recognised intrinsics. If it - // is we'll synthesize a semantically equivalent expression instead on - // an extract value expression. - switch (I->getIntrinsicID()) { - case Intrinsic::sadd_with_overflow: - case Intrinsic::uadd_with_overflow: - e.opcode = Instruction::Add; - break; - case Intrinsic::ssub_with_overflow: - case Intrinsic::usub_with_overflow: - e.opcode = Instruction::Sub; - break; - case Intrinsic::smul_with_overflow: - case Intrinsic::umul_with_overflow: - e.opcode = Instruction::Mul; - break; - default: - break; - } - - if (e.opcode != 0) { - // Intrinsic recognized. Grab its args to finish building the expression. - assert(I->getNumArgOperands() == 2 && - "Expect two args for recognised intrinsics."); - e.varargs.push_back(lookupOrAdd(I->getArgOperand(0))); - e.varargs.push_back(lookupOrAdd(I->getArgOperand(1))); - return e; - } + WithOverflowInst *WO = dyn_cast(EI->getAggregateOperand()); + if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) { + // EI is an extract from one of our with.overflow intrinsics. Synthesize + // a semantically equivalent expression instead of an extract value + // expression. + e.opcode = WO->getBinaryOp(); + e.varargs.push_back(lookupOrAdd(WO->getLHS())); + e.varargs.push_back(lookupOrAdd(WO->getRHS())); + return e; } // Not a recognised intrinsic. Fall back to producing an extract value Index: llvm/lib/Transforms/Scalar/NewGVN.cpp =================================================================== --- llvm/lib/Transforms/Scalar/NewGVN.cpp +++ llvm/lib/Transforms/Scalar/NewGVN.cpp @@ -1814,39 +1814,13 @@ const Expression * NewGVN::performSymbolicAggrValueEvaluation(Instruction *I) const { if (auto *EI = dyn_cast(I)) { - auto *II = dyn_cast(EI->getAggregateOperand()); - if (II && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) { - unsigned Opcode = 0; - // EI might be an extract from one of our recognised intrinsics. If it - // is we'll synthesize a semantically equivalent expression instead on - // an extract value expression. - switch (II->getIntrinsicID()) { - case Intrinsic::sadd_with_overflow: - case Intrinsic::uadd_with_overflow: - Opcode = Instruction::Add; - break; - case Intrinsic::ssub_with_overflow: - case Intrinsic::usub_with_overflow: - Opcode = Instruction::Sub; - break; - case Intrinsic::smul_with_overflow: - case Intrinsic::umul_with_overflow: - Opcode = Instruction::Mul; - break; - default: - break; - } - - if (Opcode != 0) { - // Intrinsic recognized. Grab its args to finish building the - // expression. - assert(II->getNumArgOperands() == 2 && - "Expect two args for recognised intrinsics."); - return createBinaryExpression(Opcode, EI->getType(), - II->getArgOperand(0), - II->getArgOperand(1), I); - } - } + auto *WO = dyn_cast(EI->getAggregateOperand()); + if (WO && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) + // EI is an extract from one of our with.overflow intrinsics. Synthesize + // a semantically equivalent expression instead of an extract value + // expression. + return createBinaryExpression(WO->getBinaryOp(), EI->getType(), + WO->getLHS(), WO->getRHS(), I); } return createAggregateValueExpression(I);