Index: docs/LangRef.rst =================================================================== --- docs/LangRef.rst +++ docs/LangRef.rst @@ -6580,6 +6580,7 @@ <result> = udiv <ty> <op1>, <op2> ; yields ty:result <result> = udiv exact <ty> <op1>, <op2> ; yields ty:result + <result> = udiv nof <ty> <op1>, <op2> ; yields ty:result Overview: """"""""" @@ -6601,14 +6602,17 @@ Note that unsigned integer division and signed integer division are distinct operations; for signed integer division, use '``sdiv``'. -Division by zero is undefined behavior. For vectors, if any element -of the divisor is zero, the operation has undefined behavior. +See the description of the ``nof`` keyword below for division by zero. If the ``exact`` keyword is present, the result value of the ``udiv`` is a :ref:`poison value <poisonvalues>` if %op1 is not a multiple of %op2 (as such, "((a udiv exact b) mul b) == a"). +``nof``stands for “No Overflowâ€. If the ``nof`` keyword is present, the result is undefined behavior for division by zero. +If the ``nof`` keyword is not present, division by zero results in poison value. +For vectors, if any element of the divisor is zero, the behavior is same as for scalar division by zero. + Example: """""""" @@ -6626,6 +6630,7 @@ <result> = sdiv <ty> <op1>, <op2> ; yields ty:result <result> = sdiv exact <ty> <op1>, <op2> ; yields ty:result + <result> = sdiv nof <ty> <op1>, <op2> ; yields ty:result Overview: """"""""" @@ -6648,14 +6653,15 @@ Note that signed integer division and unsigned integer division are distinct operations; for unsigned integer division, use '``udiv``'. -Division by zero is undefined behavior. For vectors, if any element -of the divisor is zero, the operation has undefined behavior. -Overflow also leads to undefined behavior; this is a rare case, but can -occur, for example, by doing a 32-bit division of -2147483648 by -1. +See the description of the ``nof`` keyword below for division by zero and overflow. If the ``exact`` keyword is present, the result value of the ``sdiv`` is a :ref:`poison value <poisonvalues>` if the result would be rounded. +``nof``stands for “No Overflowâ€. If the ``nof`` keyword is present, the result is undefined behavior if overflow occurs. This may be result of division by zero or dividing the smallest representable integer of the type by -1. +If the ``nof`` keyword is not present, the overflow cases described above result in poison value. +For vectors, if any element of the division causes overflow, the behavior is same as for scalar division with overflow. + Example: """""""" Index: include/llvm/Analysis/TargetFolder.h =================================================================== --- include/llvm/Analysis/TargetFolder.h +++ include/llvm/Analysis/TargetFolder.h @@ -67,11 +67,13 @@ Constant *CreateFMul(Constant *LHS, Constant *RHS) const { return Fold(ConstantExpr::getFMul(LHS, RHS)); } - Constant *CreateUDiv(Constant *LHS, Constant *RHS, bool isExact = false)const{ - return Fold(ConstantExpr::getUDiv(LHS, RHS, isExact)); - } - Constant *CreateSDiv(Constant *LHS, Constant *RHS, bool isExact = false)const{ - return Fold(ConstantExpr::getSDiv(LHS, RHS, isExact)); + Constant *CreateUDiv(Constant *LHS, Constant *RHS, bool isExact = false, + bool isNoOverflow = true) const { + return Fold(ConstantExpr::getUDiv(LHS, RHS, isExact, isNoOverflow)); + } + Constant *CreateSDiv(Constant *LHS, Constant *RHS, bool isExact = false, + bool isNoOverflow = true) const { + return Fold(ConstantExpr::getSDiv(LHS, RHS, isExact, isNoOverflow)); } Constant *CreateFDiv(Constant *LHS, Constant *RHS) const { return Fold(ConstantExpr::getFDiv(LHS, RHS)); Index: include/llvm/Analysis/TargetTransformInfo.h =================================================================== --- include/llvm/Analysis/TargetTransformInfo.h +++ include/llvm/Analysis/TargetTransformInfo.h @@ -482,6 +482,11 @@ bool isLegalMaskedScatter(Type *DataType) const; bool isLegalMaskedGather(Type *DataType) const; + /// \brief Return true if the target support div that may overflow\ + /// divide by zero without causing a side effect + bool isLegalMayOverflowUDiv(Type *DataType) const; + bool isLegalMayOverflowSDiv(Type *DataType) const; + /// Return true if the target has a unified operation to calculate division /// and remainder. If so, the additional implicit multiplication and /// subtraction required to calculate a remainder from division are free. This @@ -978,6 +983,8 @@ virtual bool isLegalMaskedLoad(Type *DataType) = 0; virtual bool isLegalMaskedScatter(Type *DataType) = 0; virtual bool isLegalMaskedGather(Type *DataType) = 0; + virtual bool isLegalMayOverflowUDiv(Type *DataType) = 0; + virtual bool isLegalMayOverflowSDiv(Type *DataType) = 0; virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0; virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) = 0; virtual bool prefersVectorizedAddressing() = 0; @@ -1204,6 +1211,12 @@ bool isLegalMaskedGather(Type *DataType) override { return Impl.isLegalMaskedGather(DataType); } + bool isLegalMayOverflowUDiv(Type *DataType) override { + return Impl.isLegalMayOverflowUDiv(DataType); + } + bool isLegalMayOverflowSDiv(Type *DataType) override { + return Impl.isLegalMayOverflowSDiv(DataType); + } bool hasDivRemOp(Type *DataType, bool IsSigned) override { return Impl.hasDivRemOp(DataType, IsSigned); } Index: include/llvm/Analysis/TargetTransformInfoImpl.h =================================================================== --- include/llvm/Analysis/TargetTransformInfoImpl.h +++ include/llvm/Analysis/TargetTransformInfoImpl.h @@ -254,6 +254,10 @@ bool isLegalMaskedGather(Type *DataType) { return false; } + bool isLegalMayOverflowUDiv(Type *DataType) { return false; } + + bool isLegalMayOverflowSDiv(Type *DataType) { return false; } + bool hasDivRemOp(Type *DataType, bool IsSigned) { return false; } bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) { return false; } Index: include/llvm/Bitcode/LLVMBitCodes.h =================================================================== --- include/llvm/Bitcode/LLVMBitCodes.h +++ include/llvm/Bitcode/LLVMBitCodes.h @@ -399,6 +399,10 @@ /// PossiblyExactOperator's SubclassOptionalData contents. enum PossiblyExactOperatorOptionalFlags { PEO_EXACT = 0 }; +/// PossiblyOverflowOperatorOptionalFlags - Flags for serializing +/// PossiblyOverflowOperator's SubclassOptionalData contents +enum PossiblyOverflowOperatorOptionalFlags { POO_NO_OVERFLOW = 1 }; + /// Encoded AtomicOrdering values. enum AtomicOrderingCodes { ORDERING_NOTATOMIC = 0, Index: include/llvm/IR/ConstantFolder.h =================================================================== --- include/llvm/IR/ConstantFolder.h +++ include/llvm/IR/ConstantFolder.h @@ -60,14 +60,14 @@ return ConstantExpr::getFMul(LHS, RHS); } - Constant *CreateUDiv(Constant *LHS, Constant *RHS, - bool isExact = false) const { - return ConstantExpr::getUDiv(LHS, RHS, isExact); + Constant *CreateUDiv(Constant *LHS, Constant *RHS, bool isExact = false, + bool isNoOverflow = true) const { + return ConstantExpr::getUDiv(LHS, RHS, isExact, isNoOverflow); } - Constant *CreateSDiv(Constant *LHS, Constant *RHS, - bool isExact = false) const { - return ConstantExpr::getSDiv(LHS, RHS, isExact); + Constant *CreateSDiv(Constant *LHS, Constant *RHS, bool isExact = false, + bool isNoOverflow = true) const { + return ConstantExpr::getSDiv(LHS, RHS, isExact, isNoOverflow); } Constant *CreateFDiv(Constant *LHS, Constant *RHS) const { Index: include/llvm/IR/Constants.h =================================================================== --- include/llvm/IR/Constants.h +++ include/llvm/IR/Constants.h @@ -911,8 +911,10 @@ static Constant *getMul(Constant *C1, Constant *C2, bool HasNUW = false, bool HasNSW = false); static Constant *getFMul(Constant *C1, Constant *C2); - static Constant *getUDiv(Constant *C1, Constant *C2, bool isExact = false); - static Constant *getSDiv(Constant *C1, Constant *C2, bool isExact = false); + static Constant *getUDiv(Constant *C1, Constant *C2, bool isExact = false, + bool isNoOverflow = true); + static Constant *getSDiv(Constant *C1, Constant *C2, bool isExact = false, + bool isNoOverflow = true); static Constant *getFDiv(Constant *C1, Constant *C2); static Constant *getURem(Constant *C1, Constant *C2); static Constant *getSRem(Constant *C1, Constant *C2); Index: include/llvm/IR/IRBuilder.h =================================================================== --- include/llvm/IR/IRBuilder.h +++ include/llvm/IR/IRBuilder.h @@ -957,29 +957,51 @@ FPMathTag, FMF), Name); } Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "", - bool isExact = false) { + bool isExact = false, bool isNoOverflow = true) { if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) - return Insert(Folder.CreateUDiv(LC, RC, isExact), Name); - if (!isExact) + return Insert(Folder.CreateUDiv(LC, RC, isExact, isNoOverflow), Name); + if (!isExact && !isNoOverflow) return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name); - return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name); + if (!isExact) + return Insert(BinaryOperator::CreateNoOverflowUDiv(LHS, RHS), Name); + if (!isNoOverflow) + return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name); + return Insert(BinaryOperator::CreateExactNoOverflowUDiv(LHS, RHS), Name); } Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateUDiv(LHS, RHS, Name, true); } + Value *CreateMayOverflowUDiv(Value *LHS, Value *RHS, const Twine &Name = "") { + return CreateUDiv(LHS, RHS, Name, false, false); + } + Value *CreateExactMayOverflowUDiv(Value *LHS, Value *RHS, + const Twine &Name = "") { + return CreateUDiv(LHS, RHS, Name, true, false); + } Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "", - bool isExact = false) { + bool isExact = false, bool isNoOverflow = true) { if (Constant *LC = dyn_cast<Constant>(LHS)) if (Constant *RC = dyn_cast<Constant>(RHS)) - return Insert(Folder.CreateSDiv(LC, RC, isExact), Name); - if (!isExact) + return Insert(Folder.CreateSDiv(LC, RC, isExact, isNoOverflow), Name); + if (!isExact && !isNoOverflow) return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name); - return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name); + if (!isExact) + return Insert(BinaryOperator::CreateNoOverflowSDiv(LHS, RHS), Name); + if (!isNoOverflow) + return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name); + return Insert(BinaryOperator::CreateExactNoOverflowSDiv(LHS, RHS), Name); } Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") { return CreateSDiv(LHS, RHS, Name, true); } + Value *CreateMayOverflowSDiv(Value *LHS, Value *RHS, const Twine &Name = "") { + return CreateSDiv(LHS, RHS, Name, false, false); + } + Value *CreateExactMayOverflowSDiv(Value *LHS, Value *RHS, + const Twine &Name = "") { + return CreateSDiv(LHS, RHS, Name, true, false); + } Value *CreateFDiv(Value *LHS, Value *RHS, const Twine &Name = "", MDNode *FPMathTag = nullptr) { if (Constant *LC = dyn_cast<Constant>(LHS)) Index: include/llvm/IR/InstrTypes.h =================================================================== --- include/llvm/IR/InstrTypes.h +++ include/llvm/IR/InstrTypes.h @@ -447,6 +447,48 @@ BO->setIsExact(true); return BO; } + static BinaryOperator *CreateNoOverflow(BinaryOps Opc, Value *V1, Value *V2, + const Twine &Name = "") { + BinaryOperator *BO = Create(Opc, V1, V2, Name); + BO->setIsNoOverflow(true); + return BO; + } + static BinaryOperator *CreateNoOverflow(BinaryOps Opc, Value *V1, Value *V2, + const Twine &Name, BasicBlock *BB) { + BinaryOperator *BO = Create(Opc, V1, V2, Name, BB); + BO->setIsNoOverflow(true); + return BO; + } + static BinaryOperator *CreateNoOverflow(BinaryOps Opc, Value *V1, Value *V2, + const Twine &Name, Instruction *I) { + BinaryOperator *BO = Create(Opc, V1, V2, Name, I); + BO->setIsNoOverflow(true); + return BO; + } + static BinaryOperator *CreateExactNoOverflow(BinaryOps Opc, Value *V1, + Value *V2, + const Twine &Name = "") { + BinaryOperator *BO = Create(Opc, V1, V2, Name); + BO->setIsExact(true); + BO->setIsNoOverflow(true); + return BO; + } + static BinaryOperator *CreateExactNoOverflow(BinaryOps Opc, Value *V1, + Value *V2, const Twine &Name, + BasicBlock *BB) { + BinaryOperator *BO = Create(Opc, V1, V2, Name, BB); + BO->setIsExact(true); + BO->setIsNoOverflow(true); + return BO; + } + static BinaryOperator *CreateExactNoOverflow(BinaryOps Opc, Value *V1, + Value *V2, const Twine &Name, + Instruction *I) { + BinaryOperator *BO = Create(Opc, V1, V2, Name, I); + BO->setIsExact(true); + BO->setIsNoOverflow(true); + return BO; + } #define DEFINE_HELPERS(OPC, NUWNSWEXACT) \ static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2, \ @@ -476,6 +518,12 @@ DEFINE_HELPERS(AShr, Exact) // CreateExactAShr DEFINE_HELPERS(LShr, Exact) // CreateExactLShr + DEFINE_HELPERS(SDiv, NoOverflow) // CreateNoOverflowSDiv + DEFINE_HELPERS(UDiv, NoOverflow) // CreateNoOverflowUDiv + + DEFINE_HELPERS(SDiv, ExactNoOverflow) // CreateExactNoOverflowSDiv + DEFINE_HELPERS(UDiv, ExactNoOverflow) // CreateExactNoOverflowUDiv + #undef DEFINE_HELPERS /// Helper functions to construct and inspect unary operations (NEG and NOT) Index: include/llvm/IR/Instruction.h =================================================================== --- include/llvm/IR/Instruction.h +++ include/llvm/IR/Instruction.h @@ -296,6 +296,11 @@ /// which supports this flag. See LangRef.html for the meaning of this flag. void setIsExact(bool b = true); + /// Set or clear the divide-by-zero\overflow flag on this instruction, which + /// must be an operator which supports this flag. See LangRef.html for the + /// meaning of this flag. + void setIsNoOverflow(bool b = true); + /// Determine whether the no unsigned wrap flag is set. bool hasNoUnsignedWrap() const; @@ -309,6 +314,9 @@ /// Determine whether the exact flag is set. bool isExact() const; + /// Determine whether the no-overflow flag is set. + bool isNoOverflow() const; + /// Set or clear all fast-math-flags on this instruction, which must be an /// operator which supports this flag. See LangRef.html for the meaning of /// this flag. Index: include/llvm/IR/NoFolder.h =================================================================== --- include/llvm/IR/NoFolder.h +++ include/llvm/IR/NoFolder.h @@ -99,28 +99,52 @@ return BinaryOperator::CreateFMul(LHS, RHS); } - Instruction *CreateUDiv(Constant *LHS, Constant *RHS, - bool isExact = false) const { - if (!isExact) + Instruction *CreateUDiv(Constant *LHS, Constant *RHS, bool isExact = false, + bool isNoOverflow = true) const { + if (!isExact && !isNoOverflow) return BinaryOperator::CreateUDiv(LHS, RHS); - return BinaryOperator::CreateExactUDiv(LHS, RHS); + if (!isExact) + return BinaryOperator::CreateNoOverflowUDiv(LHS, RHS); + if (!isNoOverflow) + return BinaryOperator::CreateExactUDiv(LHS, RHS); + return BinaryOperator::CreateExactNoOverflowUDiv(LHS, RHS); + } + + Instruction *CreateNoOverflowUDiv(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateNoOverflowUDiv(LHS, RHS); } Instruction *CreateExactUDiv(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateExactUDiv(LHS, RHS); } - Instruction *CreateSDiv(Constant *LHS, Constant *RHS, - bool isExact = false) const { - if (!isExact) + Instruction *CreateExactNoOverflowUDiv(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateExactNoOverflowUDiv(LHS, RHS); + } + + Instruction *CreateSDiv(Constant *LHS, Constant *RHS, bool isExact = false, + bool isNoOverflow = true) const { + if (!isExact && !isNoOverflow) return BinaryOperator::CreateSDiv(LHS, RHS); - return BinaryOperator::CreateExactSDiv(LHS, RHS); + if (!isExact) + return BinaryOperator::CreateNoOverflowSDiv(LHS, RHS); + if (!isNoOverflow) + return BinaryOperator::CreateExactSDiv(LHS, RHS); + return BinaryOperator::CreateExactNoOverflowSDiv(LHS, RHS); + } + + Instruction *CreateNoOverflowSDiv(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateNoOverflowSDiv(LHS, RHS); } Instruction *CreateExactSDiv(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateExactSDiv(LHS, RHS); } + Instruction *CreateExactNoOverflowSDiv(Constant *LHS, Constant *RHS) const { + return BinaryOperator::CreateExactNoOverflowSDiv(LHS, RHS); + } + Instruction *CreateFDiv(Constant *LHS, Constant *RHS) const { return BinaryOperator::CreateFDiv(LHS, RHS); } Index: include/llvm/IR/Operator.h =================================================================== --- include/llvm/IR/Operator.h +++ include/llvm/IR/Operator.h @@ -156,6 +156,43 @@ } }; +/// A udiv or sdiv instruction, which can be marked as "nof", +/// indicating that the operand values are safe and overflow +/// or div by zero not expected to occur +class PossiblyOverflowOperator : public PossiblyExactOperator { +public: + enum { IsNoOverflow = (1 << 1) }; + +private: + friend class Instruction; + friend class ConstantExpr; + + void setIsNoOverflow(bool B) { + SubclassOptionalData = + (SubclassOptionalData & ~IsNoOverflow) | (B * IsNoOverflow); + } + +public: + /// Test whether this division is known to be with no-overflow or + /// div by zero or not + bool isNoOverflow() const { return SubclassOptionalData & IsNoOverflow; } + + static bool isPossiblyOverflowOpcode(unsigned OpC) { + return OpC == Instruction::SDiv || OpC == Instruction::UDiv; + } + + static bool classof(const ConstantExpr *CE) { + return isPossiblyOverflowOpcode(CE->getOpcode()); + } + static bool classof(const Instruction *I) { + return isPossiblyOverflowOpcode(I->getOpcode()); + } + static bool classof(const Value *V) { + return (isa<Instruction>(V) && classof(cast<Instruction>(V))) || + (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V))); + } +}; + /// Convenience struct for specifying and reasoning about fast-math flags. class FastMathFlags { private: @@ -399,10 +436,10 @@ }; class SDivOperator - : public ConcreteOperator<PossiblyExactOperator, Instruction::SDiv> { + : public ConcreteOperator<PossiblyOverflowOperator, Instruction::SDiv> { }; class UDivOperator - : public ConcreteOperator<PossiblyExactOperator, Instruction::UDiv> { + : public ConcreteOperator<PossiblyOverflowOperator, Instruction::UDiv> { }; class AShrOperator : public ConcreteOperator<PossiblyExactOperator, Instruction::AShr> { Index: lib/Analysis/TargetTransformInfo.cpp =================================================================== --- lib/Analysis/TargetTransformInfo.cpp +++ lib/Analysis/TargetTransformInfo.cpp @@ -171,6 +171,14 @@ return TTIImpl->isLegalMaskedScatter(DataType); } +bool TargetTransformInfo::isLegalMayOverflowUDiv(Type *DataType) const { + return TTIImpl->isLegalMayOverflowUDiv(DataType); +} + +bool TargetTransformInfo::isLegalMayOverflowSDiv(Type *DataType) const { + return TTIImpl->isLegalMayOverflowSDiv(DataType); +} + bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const { return TTIImpl->hasDivRemOp(DataType, IsSigned); } Index: lib/AsmParser/LLLexer.cpp =================================================================== --- lib/AsmParser/LLLexer.cpp +++ lib/AsmParser/LLLexer.cpp @@ -558,6 +558,7 @@ KEYWORD(nuw); KEYWORD(nsw); KEYWORD(exact); + KEYWORD(nof); KEYWORD(inbounds); KEYWORD(inrange); KEYWORD(align); Index: lib/AsmParser/LLParser.cpp =================================================================== --- lib/AsmParser/LLParser.cpp +++ lib/AsmParser/LLParser.cpp @@ -3171,6 +3171,7 @@ bool NUW = false; bool NSW = false; bool Exact = false; + bool NOF = false; unsigned Opc = Lex.getUIntVal(); Constant *Val0, *Val1; Lex.Lex(); @@ -3188,6 +3189,9 @@ Opc == Instruction::LShr || Opc == Instruction::AShr) { if (EatIfPresent(lltok::kw_exact)) Exact = true; + if (Opc == Instruction::SDiv || Opc == Instruction::UDiv) + if (EatIfPresent(lltok::kw_nof)) + NOF = true; } if (ParseToken(lltok::lparen, "expected '(' in binary constantexpr") || ParseGlobalTypeAndValue(Val0) || @@ -3232,6 +3236,7 @@ if (NUW) Flags |= OverflowingBinaryOperator::NoUnsignedWrap; if (NSW) Flags |= OverflowingBinaryOperator::NoSignedWrap; if (Exact) Flags |= PossiblyExactOperator::IsExact; + if (NOF) Flags |= PossiblyOverflowOperator::IsNoOverflow; Constant *C = ConstantExpr::get(Opc, Val0, Val1, Flags); ID.ConstantVal = C; ID.Kind = ValID::t_Constant; @@ -5176,9 +5181,14 @@ case lltok::kw_lshr: case lltok::kw_ashr: { bool Exact = EatIfPresent(lltok::kw_exact); + bool NOF = false; + + if (Token == lltok::kw_sdiv || Token == lltok::kw_udiv) + NOF = EatIfPresent(lltok::kw_nof); if (ParseArithmetic(Inst, PFS, KeywordVal, 1)) return true; if (Exact) cast<BinaryOperator>(Inst)->setIsExact(true); + if (NOF) cast<BinaryOperator>(Inst)->setIsNoOverflow(true); return false; } Index: lib/AsmParser/LLToken.h =================================================================== --- lib/AsmParser/LLToken.h +++ lib/AsmParser/LLToken.h @@ -108,6 +108,7 @@ kw_nuw, kw_nsw, kw_exact, + kw_nof, kw_inbounds, kw_inrange, kw_align, Index: lib/Bitcode/Reader/BitcodeReader.cpp =================================================================== --- lib/Bitcode/Reader/BitcodeReader.cpp +++ lib/Bitcode/Reader/BitcodeReader.cpp @@ -2323,6 +2323,11 @@ Opc == Instruction::AShr) { if (Record[3] & (1 << bitc::PEO_EXACT)) Flags |= SDivOperator::IsExact; + if (Opc == Instruction::SDiv || + Opc == Instruction::UDiv) { + if (Record[3] & (1 << bitc::POO_NO_OVERFLOW)) + Flags |= SDivOperator::IsNoOverflow; + } } } V = ConstantExpr::get(Opc, LHS, RHS, Flags); @@ -3524,6 +3529,10 @@ Opc == Instruction::AShr) { if (Record[OpNum] & (1 << bitc::PEO_EXACT)) cast<BinaryOperator>(I)->setIsExact(true); + if (Opc == Instruction::SDiv || + Opc == Instruction::UDiv) + if (Record[OpNum] & (1 << bitc::POO_NO_OVERFLOW)) + cast<BinaryOperator>(I)->setIsNoOverflow(true); } else if (isa<FPMathOperator>(I)) { FastMathFlags FMF = getDecodedFastMathFlags(Record[OpNum]); if (FMF.any()) Index: lib/Bitcode/Writer/BitcodeWriter.cpp =================================================================== --- lib/Bitcode/Writer/BitcodeWriter.cpp +++ lib/Bitcode/Writer/BitcodeWriter.cpp @@ -1328,6 +1328,10 @@ } else if (const auto *PEO = dyn_cast<PossiblyExactOperator>(V)) { if (PEO->isExact()) Flags |= 1 << bitc::PEO_EXACT; + if (const auto *POO = dyn_cast<PossiblyOverflowOperator>(V)) { + if (POO->isNoOverflow()) + Flags |= 1 << bitc::POO_NO_OVERFLOW; + } } else if (const auto *FPMO = dyn_cast<FPMathOperator>(V)) { if (FPMO->hasAllowReassoc()) Flags |= FastMathFlags::AllowReassoc; Index: lib/IR/AsmWriter.cpp =================================================================== --- lib/IR/AsmWriter.cpp +++ lib/IR/AsmWriter.cpp @@ -1139,9 +1139,18 @@ dyn_cast<PossiblyExactOperator>(U)) { if (Div->isExact()) Out << " exact"; + if (const PossiblyOverflowOperator *PO = + dyn_cast<PossiblyOverflowOperator>(U)) { + if (PO->isNoOverflow()) + Out << " nof"; + } } else if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { if (GEP->isInBounds()) Out << " inbounds"; + } else if (const PossiblyOverflowOperator *PO = + dyn_cast<PossiblyOverflowOperator>(U)) { + if (PO->isNoOverflow()) + Out << " nof"; } } Index: lib/IR/Constants.cpp =================================================================== --- lib/IR/Constants.cpp +++ lib/IR/Constants.cpp @@ -2144,14 +2144,18 @@ return get(Instruction::FMul, C1, C2); } -Constant *ConstantExpr::getUDiv(Constant *C1, Constant *C2, bool isExact) { - return get(Instruction::UDiv, C1, C2, - isExact ? PossiblyExactOperator::IsExact : 0); +Constant *ConstantExpr::getUDiv(Constant *C1, Constant *C2, bool isExact, + bool isNoOverflow) { + unsigned Flags = (isExact ? PossiblyExactOperator::IsExact : 0) | + (isNoOverflow ? PossiblyOverflowOperator::IsNoOverflow : 0); + return get(Instruction::UDiv, C1, C2, Flags); } -Constant *ConstantExpr::getSDiv(Constant *C1, Constant *C2, bool isExact) { - return get(Instruction::SDiv, C1, C2, - isExact ? PossiblyExactOperator::IsExact : 0); +Constant *ConstantExpr::getSDiv(Constant *C1, Constant *C2, bool isExact, + bool isNoOverflow) { + unsigned Flags = (isExact ? PossiblyExactOperator::IsExact : 0) | + (isNoOverflow ? PossiblyOverflowOperator::IsNoOverflow : 0); + return get(Instruction::SDiv, C1, C2, Flags); } Constant *ConstantExpr::getFDiv(Constant *C1, Constant *C2) { @@ -2912,6 +2916,9 @@ } if (isa<PossiblyExactOperator>(BO)) BO->setIsExact(SubclassOptionalData & PossiblyExactOperator::IsExact); + if (isa<PossiblyOverflowOperator>(BO)) + BO->setIsNoOverflow(SubclassOptionalData & + PossiblyOverflowOperator::IsNoOverflow); return BO; } } Index: lib/IR/Instruction.cpp =================================================================== --- lib/IR/Instruction.cpp +++ lib/IR/Instruction.cpp @@ -109,6 +109,10 @@ cast<PossiblyExactOperator>(this)->setIsExact(b); } +void Instruction::setIsNoOverflow(bool b) { + cast<PossiblyOverflowOperator>(this)->setIsNoOverflow(b); +} + bool Instruction::hasNoUnsignedWrap() const { return cast<OverflowingBinaryOperator>(this)->hasNoUnsignedWrap(); } @@ -129,6 +133,7 @@ case Instruction::UDiv: case Instruction::SDiv: + cast<PossiblyOverflowOperator>(this)->setIsNoOverflow(true); case Instruction::AShr: case Instruction::LShr: cast<PossiblyExactOperator>(this)->setIsExact(false); @@ -144,6 +149,10 @@ return cast<PossiblyExactOperator>(this)->isExact(); } +bool Instruction::isNoOverflow() const { + return cast<PossiblyOverflowOperator>(this)->isNoOverflow(); +} + void Instruction::setFast(bool B) { assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op"); cast<FPMathOperator>(this)->setFast(B); @@ -252,6 +261,11 @@ if (isa<PossiblyExactOperator>(this)) setIsExact(PE->isExact()); + // Copy the no-overflow flag + if (auto *PO = dyn_cast<PossiblyOverflowOperator>(V)) + if (isa<PossiblyOverflowOperator>(this)) + setIsNoOverflow(PO->isNoOverflow()); + // Copy the fast-math flags. if (auto *FP = dyn_cast<FPMathOperator>(V)) if (isa<FPMathOperator>(this)) @@ -274,6 +288,10 @@ if (isa<PossiblyExactOperator>(this)) setIsExact(isExact() & PE->isExact()); + if (auto *PO = dyn_cast<PossiblyOverflowOperator>(V)) + if (isa<PossiblyOverflowOperator>(this)) + setIsNoOverflow(isNoOverflow() & PO->isNoOverflow()); + if (auto *FP = dyn_cast<FPMathOperator>(V)) { if (isa<FPMathOperator>(this)) { FastMathFlags FM = getFastMathFlags(); Index: lib/Target/X86/X86TargetTransformInfo.h =================================================================== --- lib/Target/X86/X86TargetTransformInfo.h +++ lib/Target/X86/X86TargetTransformInfo.h @@ -124,6 +124,8 @@ bool isLegalMaskedStore(Type *DataType); bool isLegalMaskedGather(Type *DataType); bool isLegalMaskedScatter(Type *DataType); + bool isLegalMayOverflowUDiv(Type *DataType); + bool isLegalMayOverflowSDiv(Type *DataType); bool hasDivRemOp(Type *DataType, bool IsSigned); bool isFCmpOrdCheaperThanFCmpZero(Type *Ty); bool areInlineCompatible(const Function *Caller, Index: lib/Target/X86/X86TargetTransformInfo.cpp =================================================================== --- lib/Target/X86/X86TargetTransformInfo.cpp +++ lib/Target/X86/X86TargetTransformInfo.cpp @@ -2533,6 +2533,14 @@ return isLegalMaskedGather(DataType); } +bool X86TTIImpl::isLegalMayOverflowUDiv(Type *DataType) { + return false; +} + +bool X86TTIImpl::isLegalMayOverflowSDiv(Type *DataType) { + return isLegalMayOverflowUDiv(DataType); +} + bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { EVT VT = TLI->getValueType(DL, DataType); return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); Index: lib/Transforms/InstCombine/InstCombineAddSub.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -1629,8 +1629,11 @@ // 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow. if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) && match(Op0, m_Zero()) && - C->isNotMinSignedValue() && !C->isOneValue()) - return BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C)); + C->isNotMinSignedValue() && !C->isOneValue()) { + Instruction *BinOp = BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C)); + BinOp->setIsNoOverflow(cast<BinaryOperator>(Op1)->isNoOverflow()); + return BinOp; + } // 0 - (X << Y) -> (-X << Y) when X is freely negatable. if (match(Op1, m_Shl(m_Value(X), m_Value(Y))) && match(Op0, m_Zero())) Index: lib/Transforms/InstCombine/InstCombineMulDivRem.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -913,9 +913,12 @@ if ((IsSigned && match(LHS, m_SDiv(m_Value(X), m_APInt(C1)))) || (!IsSigned && match(LHS, m_UDiv(m_Value(X), m_APInt(C1))))) { APInt Product(C1->getBitWidth(), /*Val=*/0ULL, IsSigned); - if (!MultiplyOverflows(*C1, *C2, Product, IsSigned)) - return BinaryOperator::Create(I.getOpcode(), X, - ConstantInt::get(I.getType(), Product)); + if (!MultiplyOverflows(*C1, *C2, Product, IsSigned)) { + Instruction *BinOp = BinaryOperator::Create( + I.getOpcode(), X, ConstantInt::get(I.getType(), Product)); + BinOp->setIsNoOverflow(I.isNoOverflow() && LHS->isNoOverflow()); + return BinOp; + } } if ((IsSigned && match(LHS, m_NSWMul(m_Value(X), m_APInt(C1)))) || @@ -927,6 +930,7 @@ BinaryOperator *BO = BinaryOperator::Create( I.getOpcode(), X, ConstantInt::get(X->getType(), Quotient)); BO->setIsExact(I.isExact()); + BO->setIsNoOverflow(I.isNoOverflow()); return BO; } @@ -955,6 +959,7 @@ BinaryOperator *BO = BinaryOperator::Create( I.getOpcode(), X, ConstantInt::get(X->getType(), Quotient)); BO->setIsExact(I.isExact()); + BO->setIsNoOverflow(I.isNoOverflow()); return BO; } @@ -1002,8 +1007,11 @@ if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) { // (X - Z) / Y; Y = Op1 bool isSigned = I.getOpcode() == Instruction::SDiv; if ((isSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) || - (!isSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1))))) - return BinaryOperator::Create(I.getOpcode(), X, Op1); + (!isSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1))))) { + Instruction *BinOp = BinaryOperator::Create(I.getOpcode(), X, Op1); + BinOp->setIsNoOverflow(I.isNoOverflow()); + return BinOp; + } } return nullptr; @@ -1141,6 +1149,9 @@ // udiv (zext X), (zext Y) --> zext (udiv X, Y) // urem (zext X), (zext Y) --> zext (urem X, Y) Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y); + Instruction *BinOp = dyn_cast<Instruction>(NarrowOp); + if (BinOp && isa<PossiblyOverflowOperator>(NarrowOp)) + BinOp->setIsNoOverflow(I.isNoOverflow()); return new ZExtInst(NarrowOp, Ty); } @@ -1158,6 +1169,9 @@ // urem C, (zext X) --> zext (urem C', X) Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC) : Builder.CreateBinOp(Opcode, TruncC, X); + Instruction *BinOp = dyn_cast<Instruction>(NarrowOp); + if (BinOp && isa<PossiblyOverflowOperator>(NarrowOp)) + BinOp->setIsNoOverflow(I.isNoOverflow()); return new ZExtInst(NarrowOp, Ty); } @@ -1191,6 +1205,7 @@ X, ConstantInt::get(X->getType(), C2ShlC1)); if (IsExact) BO->setIsExact(); + BO->setIsNoOverflow(I.isNoOverflow()); return BO; } } @@ -1286,6 +1301,7 @@ if (match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) { auto *BO = BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(RHS)); BO->setIsExact(I.isExact()); + BO->setIsNoOverflow(I.isNoOverflow()); return BO; } } @@ -1298,6 +1314,7 @@ // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); BO->setIsExact(I.isExact()); + BO->setIsNoOverflow(I.isNoOverflow()); return BO; } @@ -1308,6 +1325,7 @@ // the sign bit set. auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); BO->setIsExact(I.isExact()); + BO->setIsNoOverflow(I.isNoOverflow()); return BO; } } Index: lib/Transforms/InstCombine/InstCombineVectorOps.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -967,6 +967,9 @@ if (isa<PossiblyExactOperator>(BO)) { New->setIsExact(BO->isExact()); } + if (isa<PossiblyOverflowOperator>(BO)) { + New->setIsNoOverflow(BO->isNoOverflow()); + } if (isa<FPMathOperator>(BO)) New->copyFastMathFlags(I); return New; Index: lib/Transforms/InstCombine/InstructionCombining.cpp =================================================================== --- lib/Transforms/InstCombine/InstructionCombining.cpp +++ lib/Transforms/InstCombine/InstructionCombining.cpp @@ -931,6 +931,8 @@ Value *RI = Builder.CreateBinOp(I->getOpcode(), Op0, Op1, "phitmp"); auto *FPInst = dyn_cast<Instruction>(RI); + if (FPInst && isa<PossiblyOverflowOperator>(RI)) + FPInst->setIsNoOverflow(I->isNoOverflow()); if (FPInst && isa<FPMathOperator>(FPInst)) FPInst->copyFastMathFlags(I); return RI; Index: lib/Transforms/Scalar/CorrelatedValuePropagation.cpp =================================================================== --- lib/Transforms/Scalar/CorrelatedValuePropagation.cpp +++ lib/Transforms/Scalar/CorrelatedValuePropagation.cpp @@ -469,6 +469,7 @@ auto *BO = BinaryOperator::CreateUDiv(SDI->getOperand(0), SDI->getOperand(1), SDI->getName(), SDI); BO->setIsExact(SDI->isExact()); + BO->setIsNoOverflow(SDI->isNoOverflow()); SDI->replaceAllUsesWith(BO); SDI->eraseFromParent(); Index: lib/Transforms/Utils/SimplifyIndVar.cpp =================================================================== --- lib/Transforms/Utils/SimplifyIndVar.cpp +++ lib/Transforms/Utils/SimplifyIndVar.cpp @@ -292,6 +292,7 @@ BinaryOperator::UDiv, SDiv->getOperand(0), SDiv->getOperand(1), SDiv->getName() + ".udiv", SDiv); UDiv->setIsExact(SDiv->isExact()); + UDiv->setIsNoOverflow(SDiv->isNoOverflow()); SDiv->replaceAllUsesWith(UDiv); DEBUG(dbgs() << "INDVARS: Simplified sdiv: " << *SDiv << '\n'); ++NumSimplifiedSDiv; Index: test/Analysis/CostModel/AArch64/free-widening-casts.ll =================================================================== --- test/Analysis/CostModel/AArch64/free-widening-casts.ll +++ test/Analysis/CostModel/AArch64/free-widening-casts.ll @@ -565,7 +565,7 @@ ; COST-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %tmp0 = zext <8 x i8> %a to <8 x i16> define <8 x i16> @neg_non_widening_op(<8 x i8> %a, <8 x i16> %b) { %tmp0 = zext <8 x i8> %a to <8 x i16> - %tmp1 = udiv <8 x i16> %b, %tmp0 + %tmp1 = udiv nof <8 x i16> %b, %tmp0 ret <8 x i16> %tmp1 } Index: test/Analysis/CostModel/ARM/divrem.ll =================================================================== --- test/Analysis/CostModel/ARM/divrem.ll +++ test/Analysis/CostModel/ARM/divrem.ll @@ -4,224 +4,224 @@ ; CHECK: sdiv_v2_i8 ; CHECK: cost of 40 {{.*}} sdiv - %1 = sdiv <2 x i8> %a, %b + %1 = sdiv nof <2 x i8> %a, %b ret <2 x i8> %1 } define <2 x i16> @sdiv_v2_i16(<2 x i16> %a, <2 x i16> %b) { ; CHECK: sdiv_v2_i16 ; CHECK: cost of 40 {{.*}} sdiv - %1 = sdiv <2 x i16> %a, %b + %1 = sdiv nof <2 x i16> %a, %b ret <2 x i16> %1 } define <2 x i32> @sdiv_v2_i32(<2 x i32> %a, <2 x i32> %b) { ; CHECK: sdiv_v2_i32 ; CHECK: cost of 40 {{.*}} sdiv - %1 = sdiv <2 x i32> %a, %b + %1 = sdiv nof <2 x i32> %a, %b ret <2 x i32> %1 } define <2 x i64> @sdiv_v2_i64(<2 x i64> %a, <2 x i64> %b) { ; CHECK: sdiv_v2_i64 ; CHECK: cost of 40 {{.*}} sdiv - %1 = sdiv <2 x i64> %a, %b + %1 = sdiv nof <2 x i64> %a, %b ret <2 x i64> %1 } define <4 x i8> @sdiv_v4_i8(<4 x i8> %a, <4 x i8> %b) { ; CHECK: sdiv_v4_i8 ; CHECK: cost of 10 {{.*}} sdiv - %1 = sdiv <4 x i8> %a, %b + %1 = sdiv nof <4 x i8> %a, %b ret <4 x i8> %1 } define <4 x i16> @sdiv_v4_i16(<4 x i16> %a, <4 x i16> %b) { ; CHECK: sdiv_v4_i16 ; CHECK: cost of 10 {{.*}} sdiv - %1 = sdiv <4 x i16> %a, %b + %1 = sdiv nof <4 x i16> %a, %b ret <4 x i16> %1 } define <4 x i32> @sdiv_v4_i32(<4 x i32> %a, <4 x i32> %b) { ; CHECK: sdiv_v4_i32 ; CHECK: cost of 80 {{.*}} sdiv - %1 = sdiv <4 x i32> %a, %b + %1 = sdiv nof <4 x i32> %a, %b ret <4 x i32> %1 } define <4 x i64> @sdiv_v4_i64(<4 x i64> %a, <4 x i64> %b) { ; CHECK: sdiv_v4_i64 ; CHECK: cost of 80 {{.*}} sdiv - %1 = sdiv <4 x i64> %a, %b + %1 = sdiv nof <4 x i64> %a, %b ret <4 x i64> %1 } define <8 x i8> @sdiv_v8_i8(<8 x i8> %a, <8 x i8> %b) { ; CHECK: sdiv_v8_i8 ; CHECK: cost of 10 {{.*}} sdiv - %1 = sdiv <8 x i8> %a, %b + %1 = sdiv nof <8 x i8> %a, %b ret <8 x i8> %1 } define <8 x i16> @sdiv_v8_i16(<8 x i16> %a, <8 x i16> %b) { ; CHECK: sdiv_v8_i16 ; CHECK: cost of 160 {{.*}} sdiv - %1 = sdiv <8 x i16> %a, %b + %1 = sdiv nof <8 x i16> %a, %b ret <8 x i16> %1 } define <8 x i32> @sdiv_v8_i32(<8 x i32> %a, <8 x i32> %b) { ; CHECK: sdiv_v8_i32 ; CHECK: cost of 160 {{.*}} sdiv - %1 = sdiv <8 x i32> %a, %b + %1 = sdiv nof <8 x i32> %a, %b ret <8 x i32> %1 } define <8 x i64> @sdiv_v8_i64(<8 x i64> %a, <8 x i64> %b) { ; CHECK: sdiv_v8_i64 ; CHECK: cost of 160 {{.*}} sdiv - %1 = sdiv <8 x i64> %a, %b + %1 = sdiv nof <8 x i64> %a, %b ret <8 x i64> %1 } define <16 x i8> @sdiv_v16_i8(<16 x i8> %a, <16 x i8> %b) { ; CHECK: sdiv_v16_i8 ; CHECK: cost of 320 {{.*}} sdiv - %1 = sdiv <16 x i8> %a, %b + %1 = sdiv nof <16 x i8> %a, %b ret <16 x i8> %1 } define <16 x i16> @sdiv_v16_i16(<16 x i16> %a, <16 x i16> %b) { ; CHECK: sdiv_v16_i16 ; CHECK: cost of 320 {{.*}} sdiv - %1 = sdiv <16 x i16> %a, %b + %1 = sdiv nof <16 x i16> %a, %b ret <16 x i16> %1 } define <16 x i32> @sdiv_v16_i32(<16 x i32> %a, <16 x i32> %b) { ; CHECK: sdiv_v16_i32 ; CHECK: cost of 320 {{.*}} sdiv - %1 = sdiv <16 x i32> %a, %b + %1 = sdiv nof <16 x i32> %a, %b ret <16 x i32> %1 } define <16 x i64> @sdiv_v16_i64(<16 x i64> %a, <16 x i64> %b) { ; CHECK: sdiv_v16_i64 ; CHECK: cost of 320 {{.*}} sdiv - %1 = sdiv <16 x i64> %a, %b + %1 = sdiv nof <16 x i64> %a, %b ret <16 x i64> %1 } define <2 x i8> @udiv_v2_i8(<2 x i8> %a, <2 x i8> %b) { ; CHECK: udiv_v2_i8 ; CHECK: cost of 40 {{.*}} udiv - %1 = udiv <2 x i8> %a, %b + %1 = udiv nof <2 x i8> %a, %b ret <2 x i8> %1 } define <2 x i16> @udiv_v2_i16(<2 x i16> %a, <2 x i16> %b) { ; CHECK: udiv_v2_i16 ; CHECK: cost of 40 {{.*}} udiv - %1 = udiv <2 x i16> %a, %b + %1 = udiv nof <2 x i16> %a, %b ret <2 x i16> %1 } define <2 x i32> @udiv_v2_i32(<2 x i32> %a, <2 x i32> %b) { ; CHECK: udiv_v2_i32 ; CHECK: cost of 40 {{.*}} udiv - %1 = udiv <2 x i32> %a, %b + %1 = udiv nof <2 x i32> %a, %b ret <2 x i32> %1 } define <2 x i64> @udiv_v2_i64(<2 x i64> %a, <2 x i64> %b) { ; CHECK: udiv_v2_i64 ; CHECK: cost of 40 {{.*}} udiv - %1 = udiv <2 x i64> %a, %b + %1 = udiv nof <2 x i64> %a, %b ret <2 x i64> %1 } define <4 x i8> @udiv_v4_i8(<4 x i8> %a, <4 x i8> %b) { ; CHECK: udiv_v4_i8 ; CHECK: cost of 10 {{.*}} udiv - %1 = udiv <4 x i8> %a, %b + %1 = udiv nof <4 x i8> %a, %b ret <4 x i8> %1 } define <4 x i16> @udiv_v4_i16(<4 x i16> %a, <4 x i16> %b) { ; CHECK: udiv_v4_i16 ; CHECK: cost of 10 {{.*}} udiv - %1 = udiv <4 x i16> %a, %b + %1 = udiv nof <4 x i16> %a, %b ret <4 x i16> %1 } define <4 x i32> @udiv_v4_i32(<4 x i32> %a, <4 x i32> %b) { ; CHECK: udiv_v4_i32 ; CHECK: cost of 80 {{.*}} udiv - %1 = udiv <4 x i32> %a, %b + %1 = udiv nof <4 x i32> %a, %b ret <4 x i32> %1 } define <4 x i64> @udiv_v4_i64(<4 x i64> %a, <4 x i64> %b) { ; CHECK: udiv_v4_i64 ; CHECK: cost of 80 {{.*}} udiv - %1 = udiv <4 x i64> %a, %b + %1 = udiv nof <4 x i64> %a, %b ret <4 x i64> %1 } define <8 x i8> @udiv_v8_i8(<8 x i8> %a, <8 x i8> %b) { ; CHECK: udiv_v8_i8 ; CHECK: cost of 10 {{.*}} udiv - %1 = udiv <8 x i8> %a, %b + %1 = udiv nof <8 x i8> %a, %b ret <8 x i8> %1 } define <8 x i16> @udiv_v8_i16(<8 x i16> %a, <8 x i16> %b) { ; CHECK: udiv_v8_i16 ; CHECK: cost of 160 {{.*}} udiv - %1 = udiv <8 x i16> %a, %b + %1 = udiv nof <8 x i16> %a, %b ret <8 x i16> %1 } define <8 x i32> @udiv_v8_i32(<8 x i32> %a, <8 x i32> %b) { ; CHECK: udiv_v8_i32 ; CHECK: cost of 160 {{.*}} udiv - %1 = udiv <8 x i32> %a, %b + %1 = udiv nof <8 x i32> %a, %b ret <8 x i32> %1 } define <8 x i64> @udiv_v8_i64(<8 x i64> %a, <8 x i64> %b) { ; CHECK: udiv_v8_i64 ; CHECK: cost of 160 {{.*}} udiv - %1 = udiv <8 x i64> %a, %b + %1 = udiv nof <8 x i64> %a, %b ret <8 x i64> %1 } define <16 x i8> @udiv_v16_i8(<16 x i8> %a, <16 x i8> %b) { ; CHECK: udiv_v16_i8 ; CHECK: cost of 320 {{.*}} udiv - %1 = udiv <16 x i8> %a, %b + %1 = udiv nof <16 x i8> %a, %b ret <16 x i8> %1 } define <16 x i16> @udiv_v16_i16(<16 x i16> %a, <16 x i16> %b) { ; CHECK: udiv_v16_i16 ; CHECK: cost of 320 {{.*}} udiv - %1 = udiv <16 x i16> %a, %b + %1 = udiv nof <16 x i16> %a, %b ret <16 x i16> %1 } define <16 x i32> @udiv_v16_i32(<16 x i32> %a, <16 x i32> %b) { ; CHECK: udiv_v16_i32 ; CHECK: cost of 320 {{.*}} udiv - %1 = udiv <16 x i32> %a, %b + %1 = udiv nof <16 x i32> %a, %b ret <16 x i32> %1 } define <16 x i64> @udiv_v16_i64(<16 x i64> %a, <16 x i64> %b) { ; CHECK: udiv_v16_i64 ; CHECK: cost of 320 {{.*}} udiv - %1 = udiv <16 x i64> %a, %b + %1 = udiv nof <16 x i64> %a, %b ret <16 x i64> %1 } define <2 x i8> @srem_v2_i8(<2 x i8> %a, <2 x i8> %b) { Index: test/Analysis/CostModel/SystemZ/div-pow2.ll =================================================================== --- test/Analysis/CostModel/SystemZ/div-pow2.ll +++ test/Analysis/CostModel/SystemZ/div-pow2.ll @@ -3,152 +3,152 @@ ; Scalar sdiv define i64 @fun0(i64 %a) { - %r = sdiv i64 %a, 2 + %r = sdiv nof i64 %a, 2 ret i64 %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv i64 %a, 2 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof i64 %a, 2 } define i64 @fun1(i64 %a) { - %r = sdiv i64 %a, -4 + %r = sdiv nof i64 %a, -4 ret i64 %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv i64 %a, -4 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof i64 %a, -4 } define i32 @fun2(i32 %a) { - %r = sdiv i32 %a, 8 + %r = sdiv nof i32 %a, 8 ret i32 %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv i32 %a, 8 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof i32 %a, 8 } define i32 @fun3(i32 %a) { - %r = sdiv i32 %a, -16 + %r = sdiv nof i32 %a, -16 ret i32 %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv i32 %a, -16 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof i32 %a, -16 } define i16 @fun4(i16 %a) { - %r = sdiv i16 %a, 32 + %r = sdiv nof i16 %a, 32 ret i16 %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv i16 %a, 32 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof i16 %a, 32 } define i16 @fun5(i16 %a) { - %r = sdiv i16 %a, -64 + %r = sdiv nof i16 %a, -64 ret i16 %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv i16 %a, -64 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof i16 %a, -64 } define i8 @fun6(i8 %a) { - %r = sdiv i8 %a, 64 + %r = sdiv nof i8 %a, 64 ret i8 %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv i8 %a, 64 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof i8 %a, 64 } define i8 @fun7(i8 %a) { - %r = sdiv i8 %a, -128 + %r = sdiv nof i8 %a, -128 ret i8 %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv i8 %a, -128 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof i8 %a, -128 } ; Vector sdiv define <2 x i64> @fun8(<2 x i64> %a) { - %r = sdiv <2 x i64> %a, <i64 2, i64 2> + %r = sdiv nof <2 x i64> %a, <i64 2, i64 2> ret <2 x i64> %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv <2 x i64> %a, <i64 2, i64 2> +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof <2 x i64> %a, <i64 2, i64 2> } define <2 x i64> @fun9(<2 x i64> %a) { - %r = sdiv <2 x i64> %a, <i64 -4, i64 -4> + %r = sdiv nof <2 x i64> %a, <i64 -4, i64 -4> ret <2 x i64> %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv <2 x i64> %a, <i64 -4, i64 -4> +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof <2 x i64> %a, <i64 -4, i64 -4> } define <4 x i32> @fun10(<4 x i32> %a) { - %r = sdiv <4 x i32> %a, <i32 8, i32 8, i32 8, i32 8> + %r = sdiv nof <4 x i32> %a, <i32 8, i32 8, i32 8, i32 8> ret <4 x i32> %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv <4 x i32> %a, <i32 8, i32 8, i32 8, i32 8> +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof <4 x i32> %a, <i32 8, i32 8, i32 8, i32 8> } define <4 x i32> @fun11(<4 x i32> %a) { - %r = sdiv <4 x i32> %a, <i32 -16, i32 -16, i32 -16, i32 -16> + %r = sdiv nof <4 x i32> %a, <i32 -16, i32 -16, i32 -16, i32 -16> ret <4 x i32> %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv <4 x i32> %a, <i32 -16 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof <4 x i32> %a, <i32 -16 } define <8 x i16> @fun12(<8 x i16> %a) { - %r = sdiv <8 x i16> %a, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> + %r = sdiv nof <8 x i16> %a, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> ret <8 x i16> %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv <8 x i16> %a, <i16 32 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof <8 x i16> %a, <i16 32 } define <8 x i16> @fun13(<8 x i16> %a) { - %r = sdiv <8 x i16> %a, <i16 -64, i16 -64, i16 -64, i16 -64, i16 -64, i16 -64, i16 -64, i16 -64> + %r = sdiv nof <8 x i16> %a, <i16 -64, i16 -64, i16 -64, i16 -64, i16 -64, i16 -64, i16 -64, i16 -64> ret <8 x i16> %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv <8 x i16> %a, <i16 -64 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof <8 x i16> %a, <i16 -64 } define <16 x i8> @fun14(<16 x i8> %a) { - %r = sdiv <16 x i8> %a, <i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64> + %r = sdiv nof <16 x i8> %a, <i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64, i8 64> ret <16 x i8> %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv <16 x i8> %a, <i8 64 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof <16 x i8> %a, <i8 64 } define <16 x i8> @fun15(<16 x i8> %a) { - %r = sdiv <16 x i8> %a, <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128> + %r = sdiv nof <16 x i8> %a, <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128> ret <16 x i8> %r -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv <16 x i8> %a, <i8 -128 +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %r = sdiv nof <16 x i8> %a, <i8 -128 } ; Scalar udiv define i64 @fun16(i64 %a) { - %r = udiv i64 %a, 2 + %r = udiv nof i64 %a, 2 ret i64 %r -; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv i64 %a, 2 +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv nof i64 %a, 2 } define i32 @fun17(i32 %a) { - %r = udiv i32 %a, 8 + %r = udiv nof i32 %a, 8 ret i32 %r -; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv i32 %a, 8 +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv nof i32 %a, 8 } define i16 @fun18(i16 %a) { - %r = udiv i16 %a, 32 + %r = udiv nof i16 %a, 32 ret i16 %r -; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv i16 %a, 32 +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv nof i16 %a, 32 } define i8 @fun19(i8 %a) { - %r = udiv i8 %a, 128 + %r = udiv nof i8 %a, 128 ret i8 %r -; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv i8 %a, -128 +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv nof i8 %a, -128 } ; Vector udiv define <2 x i64> @fun20(<2 x i64> %a) { - %r = udiv <2 x i64> %a, <i64 2, i64 2> + %r = udiv nof <2 x i64> %a, <i64 2, i64 2> ret <2 x i64> %r -; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv <2 x i64> %a, <i64 2 +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv nof <2 x i64> %a, <i64 2 } define <4 x i32> @fun21(<4 x i32> %a) { - %r = udiv <4 x i32> %a, <i32 8, i32 8, i32 8, i32 8> + %r = udiv nof <4 x i32> %a, <i32 8, i32 8, i32 8, i32 8> ret <4 x i32> %r -; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv <4 x i32> %a, <i32 8 +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv nof <4 x i32> %a, <i32 8 } define <8 x i16> @fun22(<8 x i16> %a) { - %r = udiv <8 x i16> %a, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> + %r = udiv nof <8 x i16> %a, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> ret <8 x i16> %r -; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv <8 x i16> %a, <i16 32 +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv nof <8 x i16> %a, <i16 32 } define <16 x i8> @fun23(<16 x i8> %a) { - %r = udiv <16 x i8> %a, <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128> + %r = udiv nof <16 x i8> %a, <i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128, i8 128> ret <16 x i8> %r -; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv <16 x i8> %a, <i8 -128 +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %r = udiv nof <16 x i8> %a, <i8 -128 } Index: test/Analysis/CostModel/SystemZ/int-arith.ll =================================================================== --- test/Analysis/CostModel/SystemZ/int-arith.ll +++ test/Analysis/CostModel/SystemZ/int-arith.ll @@ -142,47 +142,47 @@ } define void @sdiv() { - %res0 = sdiv i8 undef, undef - %res1 = sdiv i16 undef, undef - %res2 = sdiv i32 undef, undef - %res3 = sdiv i64 undef, undef - %res4 = sdiv <2 x i8> undef, undef - %res5 = sdiv <2 x i16> undef, undef - %res6 = sdiv <2 x i32> undef, undef - %res7 = sdiv <2 x i64> undef, undef - %res8 = sdiv <4 x i8> undef, undef - %res9 = sdiv <4 x i16> undef, undef - %res10 = sdiv <4 x i32> undef, undef - %res11 = sdiv <4 x i64> undef, undef - %res12 = sdiv <8 x i8> undef, undef - %res13 = sdiv <8 x i16> undef, undef - %res14 = sdiv <8 x i32> undef, undef - %res15 = sdiv <8 x i64> undef, undef - %res16 = sdiv <16 x i8> undef, undef - %res17 = sdiv <16 x i16> undef, undef - %res18 = sdiv <16 x i32> undef, undef - %res19 = sdiv <16 x i64> undef, undef - -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res0 = sdiv i8 undef, undef -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res1 = sdiv i16 undef, undef -; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res2 = sdiv i32 undef, undef -; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = sdiv i64 undef, undef -; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res4 = sdiv <2 x i8> undef, undef -; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res5 = sdiv <2 x i16> undef, undef -; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res6 = sdiv <2 x i32> undef, undef -; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %res7 = sdiv <2 x i64> undef, undef -; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res8 = sdiv <4 x i8> undef, undef -; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res9 = sdiv <4 x i16> undef, undef -; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res10 = sdiv <4 x i32> undef, undef -; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res11 = sdiv <4 x i64> undef, undef -; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res12 = sdiv <8 x i8> undef, undef -; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res13 = sdiv <8 x i16> undef, undef -; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res14 = sdiv <8 x i32> undef, undef -; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res15 = sdiv <8 x i64> undef, undef -; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res16 = sdiv <16 x i8> undef, undef -; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res17 = sdiv <16 x i16> undef, undef -; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %res18 = sdiv <16 x i32> undef, undef -; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res19 = sdiv <16 x i64> undef, undef + %res0 = sdiv nof i8 undef, undef + %res1 = sdiv nof i16 undef, undef + %res2 = sdiv nof i32 undef, undef + %res3 = sdiv nof i64 undef, undef + %res4 = sdiv nof <2 x i8> undef, undef + %res5 = sdiv nof <2 x i16> undef, undef + %res6 = sdiv nof <2 x i32> undef, undef + %res7 = sdiv nof <2 x i64> undef, undef + %res8 = sdiv nof <4 x i8> undef, undef + %res9 = sdiv nof <4 x i16> undef, undef + %res10 = sdiv nof <4 x i32> undef, undef + %res11 = sdiv nof <4 x i64> undef, undef + %res12 = sdiv nof <8 x i8> undef, undef + %res13 = sdiv nof <8 x i16> undef, undef + %res14 = sdiv nof <8 x i32> undef, undef + %res15 = sdiv nof <8 x i64> undef, undef + %res16 = sdiv nof <16 x i8> undef, undef + %res17 = sdiv nof <16 x i16> undef, undef + %res18 = sdiv nof <16 x i32> undef, undef + %res19 = sdiv nof <16 x i64> undef, undef + +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res0 = sdiv nof i8 undef, undef +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res1 = sdiv nof i16 undef, undef +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res2 = sdiv nof i32 undef, undef +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %res3 = sdiv nof i64 undef, undef +; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res4 = sdiv nof <2 x i8> undef, undef +; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res5 = sdiv nof <2 x i16> undef, undef +; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res6 = sdiv nof <2 x i32> undef, undef +; CHECK: Cost Model: Found an estimated cost of 3 for instruction: %res7 = sdiv nof <2 x i64> undef, undef +; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res8 = sdiv nof <4 x i8> undef, undef +; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res9 = sdiv nof <4 x i16> undef, undef +; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res10 = sdiv nof <4 x i32> undef, undef +; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res11 = sdiv nof <4 x i64> undef, undef +; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res12 = sdiv nof <8 x i8> undef, undef +; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res13 = sdiv nof <8 x i16> undef, undef +; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res14 = sdiv nof <8 x i32> undef, undef +; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res15 = sdiv nof <8 x i64> undef, undef +; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res16 = sdiv nof <16 x i8> undef, undef +; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res17 = sdiv nof <16 x i16> undef, undef +; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %res18 = sdiv nof <16 x i32> undef, undef +; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res19 = sdiv nof <16 x i64> undef, undef ret void; } @@ -234,47 +234,47 @@ } define void @udiv() { - %res0 = udiv i8 undef, undef - %res1 = udiv i16 undef, undef - %res2 = udiv i32 undef, undef - %res3 = udiv i64 undef, undef - %res4 = udiv <2 x i8> undef, undef - %res5 = udiv <2 x i16> undef, undef - %res6 = udiv <2 x i32> undef, undef - %res7 = udiv <2 x i64> undef, undef - %res8 = udiv <4 x i8> undef, undef - %res9 = udiv <4 x i16> undef, undef - %res10 = udiv <4 x i32> undef, undef - %res11 = udiv <4 x i64> undef, undef - %res12 = udiv <8 x i8> undef, undef - %res13 = udiv <8 x i16> undef, undef - %res14 = udiv <8 x i32> undef, undef - %res15 = udiv <8 x i64> undef, undef - %res16 = udiv <16 x i8> undef, undef - %res17 = udiv <16 x i16> undef, undef - %res18 = udiv <16 x i32> undef, undef - %res19 = udiv <16 x i64> undef, undef - -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res0 = udiv i8 undef, undef -; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res1 = udiv i16 undef, undef -; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res2 = udiv i32 undef, undef -; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res3 = udiv i64 undef, undef -; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res4 = udiv <2 x i8> undef, undef -; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res5 = udiv <2 x i16> undef, undef -; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res6 = udiv <2 x i32> undef, undef -; CHECK: Cost Model: Found an estimated cost of 5 for instruction: %res7 = udiv <2 x i64> undef, undef -; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res8 = udiv <4 x i8> undef, undef -; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res9 = udiv <4 x i16> undef, undef -; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res10 = udiv <4 x i32> undef, undef -; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res11 = udiv <4 x i64> undef, undef -; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res12 = udiv <8 x i8> undef, undef -; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res13 = udiv <8 x i16> undef, undef -; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res14 = udiv <8 x i32> undef, undef -; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res15 = udiv <8 x i64> undef, undef -; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res16 = udiv <16 x i8> undef, undef -; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res17 = udiv <16 x i16> undef, undef -; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %res18 = udiv <16 x i32> undef, undef -; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res19 = udiv <16 x i64> undef, undef + %res0 = udiv nof i8 undef, undef + %res1 = udiv nof i16 undef, undef + %res2 = udiv nof i32 undef, undef + %res3 = udiv nof i64 undef, undef + %res4 = udiv nof <2 x i8> undef, undef + %res5 = udiv nof <2 x i16> undef, undef + %res6 = udiv nof <2 x i32> undef, undef + %res7 = udiv nof <2 x i64> undef, undef + %res8 = udiv nof <4 x i8> undef, undef + %res9 = udiv nof <4 x i16> undef, undef + %res10 = udiv nof <4 x i32> undef, undef + %res11 = udiv nof <4 x i64> undef, undef + %res12 = udiv nof <8 x i8> undef, undef + %res13 = udiv nof <8 x i16> undef, undef + %res14 = udiv nof <8 x i32> undef, undef + %res15 = udiv nof <8 x i64> undef, undef + %res16 = udiv nof <16 x i8> undef, undef + %res17 = udiv nof <16 x i16> undef, undef + %res18 = udiv nof <16 x i32> undef, undef + %res19 = udiv nof <16 x i64> undef, undef + +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res0 = udiv nof i8 undef, undef +; CHECK: Cost Model: Found an estimated cost of 4 for instruction: %res1 = udiv nof i16 undef, undef +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res2 = udiv nof i32 undef, undef +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %res3 = udiv nof i64 undef, undef +; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res4 = udiv nof <2 x i8> undef, undef +; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res5 = udiv nof <2 x i16> undef, undef +; CHECK: Cost Model: Found an estimated cost of 6 for instruction: %res6 = udiv nof <2 x i32> undef, undef +; CHECK: Cost Model: Found an estimated cost of 5 for instruction: %res7 = udiv nof <2 x i64> undef, undef +; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res8 = udiv nof <4 x i8> undef, undef +; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res9 = udiv nof <4 x i16> undef, undef +; CHECK: Cost Model: Found an estimated cost of 12 for instruction: %res10 = udiv nof <4 x i32> undef, undef +; CHECK: Cost Model: Found an estimated cost of 10 for instruction: %res11 = udiv nof <4 x i64> undef, undef +; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res12 = udiv nof <8 x i8> undef, undef +; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res13 = udiv nof <8 x i16> undef, undef +; CHECK: Cost Model: Found an estimated cost of 24 for instruction: %res14 = udiv nof <8 x i32> undef, undef +; CHECK: Cost Model: Found an estimated cost of 20 for instruction: %res15 = udiv nof <8 x i64> undef, undef +; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res16 = udiv nof <16 x i8> undef, undef +; CHECK: Cost Model: Found an estimated cost of 80 for instruction: %res17 = udiv nof <16 x i16> undef, undef +; CHECK: Cost Model: Found an estimated cost of 48 for instruction: %res18 = udiv nof <16 x i32> undef, undef +; CHECK: Cost Model: Found an estimated cost of 40 for instruction: %res19 = udiv nof <16 x i64> undef, undef ret void; } Index: test/Analysis/CostModel/SystemZ/memop-folding-int-arith.ll =================================================================== --- test/Analysis/CostModel/SystemZ/memop-folding-int-arith.ll +++ test/Analysis/CostModel/SystemZ/memop-folding-int-arith.ll @@ -92,58 +92,58 @@ define void @sdiv() { %li32 = load i32, i32* undef - sdiv i32 %li32, undef + sdiv nof i32 %li32, undef %li32_0 = load i32, i32* undef %li32_1 = load i32, i32* undef - sdiv i32 %li32_0, %li32_1 + sdiv nof i32 %li32_0, %li32_1 %li64 = load i64, i64* undef - sdiv i64 %li64, undef + sdiv nof i64 %li64, undef %li64_0 = load i64, i64* undef %li64_1 = load i64, i64* undef - sdiv i64 %li64_0, %li64_1 + sdiv nof i64 %li64_0, %li64_1 ret void; ; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32 = load i32, i32* undef -; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %1 = sdiv i32 %li32, undef +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %1 = sdiv nof i32 %li32, undef ; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32_0 = load i32, i32* undef ; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32_1 = load i32, i32* undef -; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %2 = sdiv i32 %li32_0, %li32_1 +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %2 = sdiv nof i32 %li32_0, %li32_1 ; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64 = load i64, i64* undef -; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %3 = sdiv i64 %li64, undef +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %3 = sdiv nof i64 %li64, undef ; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64_0 = load i64, i64* undef ; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li64_1 = load i64, i64* undef -; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = sdiv i64 %li64_0, %li64_1 +; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %4 = sdiv nof i64 %li64_0, %li64_1 } define void @udiv() { %li32 = load i32, i32* undef - udiv i32 %li32, undef + udiv nof i32 %li32, undef %li32_0 = load i32, i32* undef %li32_1 = load i32, i32* undef - udiv i32 %li32_0, %li32_1 + udiv nof i32 %li32_0, %li32_1 %li64 = load i64, i64* undef - udiv i64 %li64, undef + udiv nof i64 %li64, undef %li64_0 = load i64, i64* undef %li64_1 = load i64, i64* undef - udiv i64 %li64_0, %li64_1 + udiv nof i64 %li64_0, %li64_1 ret void; ; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32 = load i32, i32* undef -; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %1 = udiv i32 %li32, undef +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %1 = udiv nof i32 %li32, undef ; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32_0 = load i32, i32* undef ; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32_1 = load i32, i32* undef -; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %2 = udiv i32 %li32_0, %li32_1 +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %2 = udiv nof i32 %li32_0, %li32_1 ; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64 = load i64, i64* undef -; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %3 = udiv i64 %li64, undef +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %3 = udiv nof i64 %li64, undef ; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li64_0 = load i64, i64* undef ; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li64_1 = load i64, i64* undef -; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %4 = udiv i64 %li64_0, %li64_1 +; CHECK: Cost Model: Found an estimated cost of 2 for instruction: %4 = udiv nof i64 %li64_0, %li64_1 } define void @and() { Index: test/Analysis/CostModel/X86/div.ll =================================================================== --- test/Analysis/CostModel/X86/div.ll +++ test/Analysis/CostModel/X86/div.ll @@ -12,52 +12,52 @@ ; CHECK-LABEL: 'sdiv' define i32 @sdiv() { ; CHECK: cost of 1 {{.*}} %I64 = sdiv - %I64 = sdiv i64 undef, undef + %I64 = sdiv nof i64 undef, undef ; SSE: cost of 40 {{.*}} %V2i64 = sdiv ; AVX: cost of 40 {{.*}} %V2i64 = sdiv - %V2i64 = sdiv <2 x i64> undef, undef + %V2i64 = sdiv nof <2 x i64> undef, undef ; SSE: cost of 80 {{.*}} %V4i64 = sdiv ; AVX: cost of 80 {{.*}} %V4i64 = sdiv - %V4i64 = sdiv <4 x i64> undef, undef + %V4i64 = sdiv nof <4 x i64> undef, undef ; SSE: cost of 160 {{.*}} %V8i64 = sdiv ; AVX: cost of 160 {{.*}} %V8i64 = sdiv - %V8i64 = sdiv <8 x i64> undef, undef + %V8i64 = sdiv nof <8 x i64> undef, undef ; CHECK: cost of 1 {{.*}} %I32 = sdiv - %I32 = sdiv i32 undef, undef + %I32 = sdiv nof i32 undef, undef ; SSE: cost of 80 {{.*}} %V4i32 = sdiv ; AVX: cost of 80 {{.*}} %V4i32 = sdiv - %V4i32 = sdiv <4 x i32> undef, undef + %V4i32 = sdiv nof <4 x i32> undef, undef ; SSE: cost of 160 {{.*}} %V8i32 = sdiv ; AVX: cost of 160 {{.*}} %V8i32 = sdiv - %V8i32 = sdiv <8 x i32> undef, undef + %V8i32 = sdiv nof <8 x i32> undef, undef ; SSE: cost of 320 {{.*}} %V16i32 = sdiv ; AVX: cost of 320 {{.*}} %V16i32 = sdiv - %V16i32 = sdiv <16 x i32> undef, undef + %V16i32 = sdiv nof <16 x i32> undef, undef ; CHECK: cost of 1 {{.*}} %I16 = sdiv - %I16 = sdiv i16 undef, undef + %I16 = sdiv nof i16 undef, undef ; SSE: cost of 160 {{.*}} %V8i16 = sdiv ; AVX: cost of 160 {{.*}} %V8i16 = sdiv - %V8i16 = sdiv <8 x i16> undef, undef + %V8i16 = sdiv nof <8 x i16> undef, undef ; SSE: cost of 320 {{.*}} %V16i16 = sdiv ; AVX: cost of 320 {{.*}} %V16i16 = sdiv - %V16i16 = sdiv <16 x i16> undef, undef + %V16i16 = sdiv nof <16 x i16> undef, undef ; SSE: cost of 640 {{.*}} %V32i16 = sdiv ; AVX: cost of 640 {{.*}} %V32i16 = sdiv - %V32i16 = sdiv <32 x i16> undef, undef + %V32i16 = sdiv nof <32 x i16> undef, undef ; CHECK: cost of 1 {{.*}} %I8 = sdiv - %I8 = sdiv i8 undef, undef + %I8 = sdiv nof i8 undef, undef ; SSE: cost of 320 {{.*}} %V16i8 = sdiv ; AVX: cost of 320 {{.*}} %V16i8 = sdiv - %V16i8 = sdiv <16 x i8> undef, undef + %V16i8 = sdiv nof <16 x i8> undef, undef ; SSE: cost of 640 {{.*}} %V32i8 = sdiv ; AVX: cost of 640 {{.*}} %V32i8 = sdiv - %V32i8 = sdiv <32 x i8> undef, undef + %V32i8 = sdiv nof <32 x i8> undef, undef ; SSE: cost of 1280 {{.*}} %V64i8 = sdiv ; AVX: cost of 1280 {{.*}} %V64i8 = sdiv - %V64i8 = sdiv <64 x i8> undef, undef + %V64i8 = sdiv nof <64 x i8> undef, undef ret i32 undef } @@ -65,52 +65,52 @@ ; CHECK-LABEL: 'udiv' define i32 @udiv() { ; CHECK: cost of 1 {{.*}} %I64 = udiv - %I64 = udiv i64 undef, undef + %I64 = udiv nof i64 undef, undef ; SSE: cost of 40 {{.*}} %V2i64 = udiv ; AVX: cost of 40 {{.*}} %V2i64 = udiv - %V2i64 = udiv <2 x i64> undef, undef + %V2i64 = udiv nof <2 x i64> undef, undef ; SSE: cost of 80 {{.*}} %V4i64 = udiv ; AVX: cost of 80 {{.*}} %V4i64 = udiv - %V4i64 = udiv <4 x i64> undef, undef + %V4i64 = udiv nof <4 x i64> undef, undef ; SSE: cost of 160 {{.*}} %V8i64 = udiv ; AVX: cost of 160 {{.*}} %V8i64 = udiv - %V8i64 = udiv <8 x i64> undef, undef + %V8i64 = udiv nof <8 x i64> undef, undef ; CHECK: cost of 1 {{.*}} %I32 = udiv - %I32 = udiv i32 undef, undef + %I32 = udiv nof i32 undef, undef ; SSE: cost of 80 {{.*}} %V4i32 = udiv ; AVX: cost of 80 {{.*}} %V4i32 = udiv - %V4i32 = udiv <4 x i32> undef, undef + %V4i32 = udiv nof <4 x i32> undef, undef ; SSE: cost of 160 {{.*}} %V8i32 = udiv ; AVX: cost of 160 {{.*}} %V8i32 = udiv - %V8i32 = udiv <8 x i32> undef, undef + %V8i32 = udiv nof <8 x i32> undef, undef ; SSE: cost of 320 {{.*}} %V16i32 = udiv ; AVX: cost of 320 {{.*}} %V16i32 = udiv - %V16i32 = udiv <16 x i32> undef, undef + %V16i32 = udiv nof <16 x i32> undef, undef ; CHECK: cost of 1 {{.*}} %I16 = udiv - %I16 = udiv i16 undef, undef + %I16 = udiv nof i16 undef, undef ; SSE: cost of 160 {{.*}} %V8i16 = udiv ; AVX: cost of 160 {{.*}} %V8i16 = udiv - %V8i16 = udiv <8 x i16> undef, undef + %V8i16 = udiv nof <8 x i16> undef, undef ; SSE: cost of 320 {{.*}} %V16i16 = udiv ; AVX: cost of 320 {{.*}} %V16i16 = udiv - %V16i16 = udiv <16 x i16> undef, undef + %V16i16 = udiv nof <16 x i16> undef, undef ; SSE: cost of 640 {{.*}} %V32i16 = udiv ; AVX: cost of 640 {{.*}} %V32i16 = udiv - %V32i16 = udiv <32 x i16> undef, undef + %V32i16 = udiv nof <32 x i16> undef, undef ; CHECK: cost of 1 {{.*}} %I8 = udiv - %I8 = udiv i8 undef, undef + %I8 = udiv nof i8 undef, undef ; SSE: cost of 320 {{.*}} %V16i8 = udiv ; AVX: cost of 320 {{.*}} %V16i8 = udiv - %V16i8 = udiv <16 x i8> undef, undef + %V16i8 = udiv nof <16 x i8> undef, undef ; SSE: cost of 640 {{.*}} %V32i8 = udiv ; AVX: cost of 640 {{.*}} %V32i8 = udiv - %V32i8 = udiv <32 x i8> undef, undef + %V32i8 = udiv nof <32 x i8> undef, undef ; SSE: cost of 1280 {{.*}} %V64i8 = udiv ; AVX: cost of 1280 {{.*}} %V64i8 = udiv - %V64i8 = udiv <64 x i8> undef, undef + %V64i8 = udiv nof <64 x i8> undef, undef ret i32 undef } @@ -118,67 +118,67 @@ ; CHECK-LABEL: 'sdiv_uniformconst' define i32 @sdiv_uniformconst() { ; CHECK: cost of 1 {{.*}} %I64 = sdiv - %I64 = sdiv i64 undef, 7 + %I64 = sdiv nof i64 undef, 7 ; SSE: cost of 40 {{.*}} %V2i64 = sdiv ; AVX: cost of 40 {{.*}} %V2i64 = sdiv - %V2i64 = sdiv <2 x i64> undef, <i64 7, i64 7> + %V2i64 = sdiv nof <2 x i64> undef, <i64 7, i64 7> ; SSE: cost of 80 {{.*}} %V4i64 = sdiv ; AVX: cost of 80 {{.*}} %V4i64 = sdiv - %V4i64 = sdiv <4 x i64> undef, <i64 7, i64 7, i64 7, i64 7> + %V4i64 = sdiv nof <4 x i64> undef, <i64 7, i64 7, i64 7, i64 7> ; SSE: cost of 160 {{.*}} %V8i64 = sdiv ; AVX: cost of 160 {{.*}} %V8i64 = sdiv - %V8i64 = sdiv <8 x i64> undef, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7> + %V8i64 = sdiv nof <8 x i64> undef, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7> ; CHECK: cost of 1 {{.*}} %I32 = sdiv - %I32 = sdiv i32 undef, 7 + %I32 = sdiv nof i32 undef, 7 ; SSE2: cost of 19 {{.*}} %V4i32 = sdiv ; SSSE3: cost of 19 {{.*}} %V4i32 = sdiv ; SSE42: cost of 15 {{.*}} %V4i32 = sdiv ; AVX: cost of 15 {{.*}} %V4i32 = sdiv - %V4i32 = sdiv <4 x i32> undef, <i32 7, i32 7, i32 7, i32 7> + %V4i32 = sdiv nof <4 x i32> undef, <i32 7, i32 7, i32 7, i32 7> ; SSE2: cost of 38 {{.*}} %V8i32 = sdiv ; SSSE3: cost of 38 {{.*}} %V8i32 = sdiv ; SSE42: cost of 30 {{.*}} %V8i32 = sdiv ; AVX1: cost of 32 {{.*}} %V8i32 = sdiv ; AVX2: cost of 15 {{.*}} %V8i32 = sdiv ; AVX512: cost of 15 {{.*}} %V8i32 = sdiv - %V8i32 = sdiv <8 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> + %V8i32 = sdiv nof <8 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> ; SSE2: cost of 76 {{.*}} %V16i32 = sdiv ; SSSE3: cost of 76 {{.*}} %V16i32 = sdiv ; SSE42: cost of 60 {{.*}} %V16i32 = sdiv ; AVX1: cost of 64 {{.*}} %V16i32 = sdiv ; AVX2: cost of 30 {{.*}} %V16i32 = sdiv ; AVX512: cost of 15 {{.*}} %V16i32 = sdiv - %V16i32 = sdiv <16 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> + %V16i32 = sdiv nof <16 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> ; CHECK: cost of 1 {{.*}} %I16 = sdiv - %I16 = sdiv i16 undef, 7 + %I16 = sdiv nof i16 undef, 7 ; SSE: cost of 6 {{.*}} %V8i16 = sdiv ; AVX: cost of 6 {{.*}} %V8i16 = sdiv - %V8i16 = sdiv <8 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %V8i16 = sdiv nof <8 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ; SSE: cost of 12 {{.*}} %V16i16 = sdiv ; AVX1: cost of 14 {{.*}} %V16i16 = sdiv ; AVX2: cost of 6 {{.*}} %V16i16 = sdiv ; AVX512: cost of 6 {{.*}} %V16i16 = sdiv - %V16i16 = sdiv <16 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %V16i16 = sdiv nof <16 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ; SSE: cost of 24 {{.*}} %V32i16 = sdiv ; AVX1: cost of 28 {{.*}} %V32i16 = sdiv ; AVX2: cost of 12 {{.*}} %V32i16 = sdiv ; AVX512F: cost of 12 {{.*}} %V32i16 = sdiv ; AVX512BW: cost of 6 {{.*}} %V32i16 = sdiv - %V32i16 = sdiv <32 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %V32i16 = sdiv nof <32 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ; CHECK: cost of 1 {{.*}} %I8 = sdiv - %I8 = sdiv i8 undef, 7 + %I8 = sdiv nof i8 undef, 7 ; SSE: cost of 320 {{.*}} %V16i8 = sdiv ; AVX: cost of 320 {{.*}} %V16i8 = sdiv - %V16i8 = sdiv <16 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> + %V16i8 = sdiv nof <16 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> ; SSE: cost of 640 {{.*}} %V32i8 = sdiv ; AVX: cost of 640 {{.*}} %V32i8 = sdiv - %V32i8 = sdiv <32 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> + %V32i8 = sdiv nof <32 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> ; SSE: cost of 1280 {{.*}} %V64i8 = sdiv ; AVX: cost of 1280 {{.*}} %V64i8 = sdiv - %V64i8 = sdiv <64 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> + %V64i8 = sdiv nof <64 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> ret i32 undef } @@ -186,61 +186,61 @@ ; CHECK-LABEL: 'udiv_uniformconst' define i32 @udiv_uniformconst() { ; CHECK: cost of 1 {{.*}} %I64 = udiv - %I64 = udiv i64 undef, 7 + %I64 = udiv nof i64 undef, 7 ; SSE: cost of 40 {{.*}} %V2i64 = udiv ; AVX: cost of 40 {{.*}} %V2i64 = udiv - %V2i64 = udiv <2 x i64> undef, <i64 7, i64 7> + %V2i64 = udiv nof <2 x i64> undef, <i64 7, i64 7> ; SSE: cost of 80 {{.*}} %V4i64 = udiv ; AVX: cost of 80 {{.*}} %V4i64 = udiv - %V4i64 = udiv <4 x i64> undef, <i64 7, i64 7, i64 7, i64 7> + %V4i64 = udiv nof <4 x i64> undef, <i64 7, i64 7, i64 7, i64 7> ; SSE: cost of 160 {{.*}} %V8i64 = udiv ; AVX: cost of 160 {{.*}} %V8i64 = udiv - %V8i64 = udiv <8 x i64> undef, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7> + %V8i64 = udiv nof <8 x i64> undef, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7> ; CHECK: cost of 1 {{.*}} %I32 = udiv - %I32 = udiv i32 undef, 7 + %I32 = udiv nof i32 undef, 7 ; SSE: cost of 15 {{.*}} %V4i32 = udiv ; AVX: cost of 15 {{.*}} %V4i32 = udiv - %V4i32 = udiv <4 x i32> undef, <i32 7, i32 7, i32 7, i32 7> + %V4i32 = udiv nof <4 x i32> undef, <i32 7, i32 7, i32 7, i32 7> ; SSE: cost of 30 {{.*}} %V8i32 = udiv ; AVX1: cost of 32 {{.*}} %V8i32 = udiv ; AVX2: cost of 15 {{.*}} %V8i32 = udiv ; AVX512: cost of 15 {{.*}} %V8i32 = udiv - %V8i32 = udiv <8 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> + %V8i32 = udiv nof <8 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> ; SSE: cost of 60 {{.*}} %V16i32 = udiv ; AVX1: cost of 64 {{.*}} %V16i32 = udiv ; AVX2: cost of 30 {{.*}} %V16i32 = udiv ; AVX512: cost of 15 {{.*}} %V16i32 = udiv - %V16i32 = udiv <16 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> + %V16i32 = udiv nof <16 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> ; CHECK: cost of 1 {{.*}} %I16 = udiv - %I16 = udiv i16 undef, 7 + %I16 = udiv nof i16 undef, 7 ; SSE: cost of 6 {{.*}} %V8i16 = udiv ; AVX: cost of 6 {{.*}} %V8i16 = udiv - %V8i16 = udiv <8 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %V8i16 = udiv nof <8 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ; SSE: cost of 12 {{.*}} %V16i16 = udiv ; AVX1: cost of 14 {{.*}} %V16i16 = udiv ; AVX2: cost of 6 {{.*}} %V16i16 = udiv ; AVX512: cost of 6 {{.*}} %V16i16 = udiv - %V16i16 = udiv <16 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %V16i16 = udiv nof <16 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ; SSE: cost of 24 {{.*}} %V32i16 = udiv ; AVX1: cost of 28 {{.*}} %V32i16 = udiv ; AVX2: cost of 12 {{.*}} %V32i16 = udiv ; AVX512F: cost of 12 {{.*}} %V32i16 = udiv ; AVX512BW: cost of 6 {{.*}} %V32i16 = udiv - %V32i16 = udiv <32 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %V32i16 = udiv nof <32 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ; CHECK: cost of 1 {{.*}} %I8 = udiv - %I8 = udiv i8 undef, 7 + %I8 = udiv nof i8 undef, 7 ; SSE: cost of 320 {{.*}} %V16i8 = udiv ; AVX: cost of 320 {{.*}} %V16i8 = udiv - %V16i8 = udiv <16 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> + %V16i8 = udiv nof <16 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> ; SSE: cost of 640 {{.*}} %V32i8 = udiv ; AVX: cost of 640 {{.*}} %V32i8 = udiv - %V32i8 = udiv <32 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> + %V32i8 = udiv nof <32 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> ; SSE: cost of 1280 {{.*}} %V64i8 = udiv ; AVX: cost of 1280 {{.*}} %V64i8 = udiv - %V64i8 = udiv <64 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> + %V64i8 = udiv nof <64 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> ret i32 undef } @@ -248,67 +248,67 @@ ; CHECK-LABEL: 'sdiv_uniformconstpow2' define i32 @sdiv_uniformconstpow2() { ; CHECK: cost of 1 {{.*}} %I64 = sdiv - %I64 = sdiv i64 undef, 16 + %I64 = sdiv nof i64 undef, 16 ; SSE: cost of 40 {{.*}} %V2i64 = sdiv ; AVX: cost of 40 {{.*}} %V2i64 = sdiv - %V2i64 = sdiv <2 x i64> undef, <i64 16, i64 16> + %V2i64 = sdiv nof <2 x i64> undef, <i64 16, i64 16> ; SSE: cost of 80 {{.*}} %V4i64 = sdiv ; AVX: cost of 80 {{.*}} %V4i64 = sdiv - %V4i64 = sdiv <4 x i64> undef, <i64 16, i64 16, i64 16, i64 16> + %V4i64 = sdiv nof <4 x i64> undef, <i64 16, i64 16, i64 16, i64 16> ; SSE: cost of 160 {{.*}} %V8i64 = sdiv ; AVX: cost of 160 {{.*}} %V8i64 = sdiv - %V8i64 = sdiv <8 x i64> undef, <i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16> + %V8i64 = sdiv nof <8 x i64> undef, <i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16> ; CHECK: cost of 1 {{.*}} %I32 = sdiv - %I32 = sdiv i32 undef, 16 + %I32 = sdiv nof i32 undef, 16 ; SSE2: cost of 19 {{.*}} %V4i32 = sdiv ; SSSE3: cost of 19 {{.*}} %V4i32 = sdiv ; SSE42: cost of 15 {{.*}} %V4i32 = sdiv ; AVX: cost of 15 {{.*}} %V4i32 = sdiv - %V4i32 = sdiv <4 x i32> undef, <i32 16, i32 16, i32 16, i32 16> + %V4i32 = sdiv nof <4 x i32> undef, <i32 16, i32 16, i32 16, i32 16> ; SSE2: cost of 38 {{.*}} %V8i32 = sdiv ; SSSE3: cost of 38 {{.*}} %V8i32 = sdiv ; SSE42: cost of 30 {{.*}} %V8i32 = sdiv ; AVX1: cost of 32 {{.*}} %V8i32 = sdiv ; AVX2: cost of 15 {{.*}} %V8i32 = sdiv ; AVX512: cost of 15 {{.*}} %V8i32 = sdiv - %V8i32 = sdiv <8 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> + %V8i32 = sdiv nof <8 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> ; SSE2: cost of 76 {{.*}} %V16i32 = sdiv ; SSSE3: cost of 76 {{.*}} %V16i32 = sdiv ; SSE42: cost of 60 {{.*}} %V16i32 = sdiv ; AVX1: cost of 64 {{.*}} %V16i32 = sdiv ; AVX2: cost of 30 {{.*}} %V16i32 = sdiv ; AVX512: cost of 15 {{.*}} %V16i32 = sdiv - %V16i32 = sdiv <16 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> + %V16i32 = sdiv nof <16 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> ; CHECK: cost of 1 {{.*}} %I16 = sdiv - %I16 = sdiv i16 undef, 16 + %I16 = sdiv nof i16 undef, 16 ; SSE: cost of 6 {{.*}} %V8i16 = sdiv ; AVX: cost of 6 {{.*}} %V8i16 = sdiv - %V8i16 = sdiv <8 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16> + %V8i16 = sdiv nof <8 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16> ; SSE: cost of 12 {{.*}} %V16i16 = sdiv ; AVX1: cost of 14 {{.*}} %V16i16 = sdiv ; AVX2: cost of 6 {{.*}} %V16i16 = sdiv ; AVX512: cost of 6 {{.*}} %V16i16 = sdiv - %V16i16 = sdiv <16 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16> + %V16i16 = sdiv nof <16 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16> ; SSE: cost of 24 {{.*}} %V32i16 = sdiv ; AVX1: cost of 28 {{.*}} %V32i16 = sdiv ; AVX2: cost of 12 {{.*}} %V32i16 = sdiv ; AVX512F: cost of 12 {{.*}} %V32i16 = sdiv ; AVX512BW: cost of 6 {{.*}} %V32i16 = sdiv - %V32i16 = sdiv <32 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16> + %V32i16 = sdiv nof <32 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16> ; CHECK: cost of 1 {{.*}} %I8 = sdiv - %I8 = sdiv i8 undef, 16 + %I8 = sdiv nof i8 undef, 16 ; SSE: cost of 320 {{.*}} %V16i8 = sdiv ; AVX: cost of 320 {{.*}} %V16i8 = sdiv - %V16i8 = sdiv <16 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16> + %V16i8 = sdiv nof <16 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16> ; SSE: cost of 640 {{.*}} %V32i8 = sdiv ; AVX: cost of 640 {{.*}} %V32i8 = sdiv - %V32i8 = sdiv <32 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16> + %V32i8 = sdiv nof <32 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16> ; SSE: cost of 1280 {{.*}} %V64i8 = sdiv ; AVX: cost of 1280 {{.*}} %V64i8 = sdiv - %V64i8 = sdiv <64 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16> + %V64i8 = sdiv nof <64 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16> ret i32 undef } @@ -316,61 +316,61 @@ ; CHECK-LABEL: 'udiv_uniformconstpow2' define i32 @udiv_uniformconstpow2() { ; CHECK: cost of 1 {{.*}} %I64 = udiv - %I64 = udiv i64 undef, 16 + %I64 = udiv nof i64 undef, 16 ; SSE: cost of 40 {{.*}} %V2i64 = udiv ; AVX: cost of 40 {{.*}} %V2i64 = udiv - %V2i64 = udiv <2 x i64> undef, <i64 16, i64 16> + %V2i64 = udiv nof <2 x i64> undef, <i64 16, i64 16> ; SSE: cost of 80 {{.*}} %V4i64 = udiv ; AVX: cost of 80 {{.*}} %V4i64 = udiv - %V4i64 = udiv <4 x i64> undef, <i64 16, i64 16, i64 16, i64 16> + %V4i64 = udiv nof <4 x i64> undef, <i64 16, i64 16, i64 16, i64 16> ; SSE: cost of 160 {{.*}} %V8i64 = udiv ; AVX: cost of 160 {{.*}} %V8i64 = udiv - %V8i64 = udiv <8 x i64> undef, <i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16> + %V8i64 = udiv nof <8 x i64> undef, <i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16> ; CHECK: cost of 1 {{.*}} %I32 = udiv - %I32 = udiv i32 undef, 16 + %I32 = udiv nof i32 undef, 16 ; SSE: cost of 15 {{.*}} %V4i32 = udiv ; AVX: cost of 15 {{.*}} %V4i32 = udiv - %V4i32 = udiv <4 x i32> undef, <i32 16, i32 16, i32 16, i32 16> + %V4i32 = udiv nof <4 x i32> undef, <i32 16, i32 16, i32 16, i32 16> ; SSE: cost of 30 {{.*}} %V8i32 = udiv ; AVX1: cost of 32 {{.*}} %V8i32 = udiv ; AVX2: cost of 15 {{.*}} %V8i32 = udiv ; AVX512: cost of 15 {{.*}} %V8i32 = udiv - %V8i32 = udiv <8 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> + %V8i32 = udiv nof <8 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> ; SSE: cost of 60 {{.*}} %V16i32 = udiv ; AVX1: cost of 64 {{.*}} %V16i32 = udiv ; AVX2: cost of 30 {{.*}} %V16i32 = udiv ; AVX512: cost of 15 {{.*}} %V16i32 = udiv - %V16i32 = udiv <16 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> + %V16i32 = udiv nof <16 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> ; CHECK: cost of 1 {{.*}} %I16 = udiv - %I16 = udiv i16 undef, 16 + %I16 = udiv nof i16 undef, 16 ; SSE: cost of 6 {{.*}} %V8i16 = udiv ; AVX: cost of 6 {{.*}} %V8i16 = udiv - %V8i16 = udiv <8 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16> + %V8i16 = udiv nof <8 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16> ; SSE: cost of 12 {{.*}} %V16i16 = udiv ; AVX1: cost of 14 {{.*}} %V16i16 = udiv ; AVX2: cost of 6 {{.*}} %V16i16 = udiv ; AVX512: cost of 6 {{.*}} %V16i16 = udiv - %V16i16 = udiv <16 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16> + %V16i16 = udiv nof <16 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16> ; SSE: cost of 24 {{.*}} %V32i16 = udiv ; AVX1: cost of 28 {{.*}} %V32i16 = udiv ; AVX2: cost of 12 {{.*}} %V32i16 = udiv ; AVX512F: cost of 12 {{.*}} %V32i16 = udiv ; AVX512BW: cost of 6 {{.*}} %V32i16 = udiv - %V32i16 = udiv <32 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16> + %V32i16 = udiv nof <32 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16> ; CHECK: cost of 1 {{.*}} %I8 = udiv - %I8 = udiv i8 undef, 16 + %I8 = udiv nof i8 undef, 16 ; SSE: cost of 320 {{.*}} %V16i8 = udiv ; AVX: cost of 320 {{.*}} %V16i8 = udiv - %V16i8 = udiv <16 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16> + %V16i8 = udiv nof <16 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16> ; SSE: cost of 640 {{.*}} %V32i8 = udiv ; AVX: cost of 640 {{.*}} %V32i8 = udiv - %V32i8 = udiv <32 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16> + %V32i8 = udiv nof <32 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16> ; SSE: cost of 1280 {{.*}} %V64i8 = udiv ; AVX: cost of 1280 {{.*}} %V64i8 = udiv - %V64i8 = udiv <64 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16> + %V64i8 = udiv nof <64 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16> ret i32 undef } Index: test/Analysis/CostModel/X86/vdiv-cost.ll =================================================================== --- test/Analysis/CostModel/X86/vdiv-cost.ll +++ test/Analysis/CostModel/X86/vdiv-cost.ll @@ -8,7 +8,7 @@ ; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512 --check-prefix=AVX512DQ define <4 x i32> @test1(<4 x i32> %a) { - %div = udiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7> + %div = udiv nof <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7> ret <4 x i32> %div ; CHECK: 'Cost Model Analysis' for function 'test1': @@ -18,7 +18,7 @@ } define <8 x i32> @test2(<8 x i32> %a) { - %div = udiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7> + %div = udiv nof <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7> ret <8 x i32> %div ; CHECK: 'Cost Model Analysis' for function 'test2': @@ -29,7 +29,7 @@ } define <8 x i16> @test3(<8 x i16> %a) { - %div = udiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %div = udiv nof <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ret <8 x i16> %div ; CHECK: 'Cost Model Analysis' for function 'test3': @@ -39,7 +39,7 @@ } define <16 x i16> @test4(<16 x i16> %a) { - %div = udiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7> + %div = udiv nof <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7> ret <16 x i16> %div ; CHECK: 'Cost Model Analysis' for function 'test4': @@ -50,7 +50,7 @@ } define <8 x i16> @test5(<8 x i16> %a) { - %div = sdiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %div = sdiv nof <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ret <8 x i16> %div ; CHECK: 'Cost Model Analysis' for function 'test5': @@ -60,7 +60,7 @@ } define <16 x i16> @test6(<16 x i16> %a) { - %div = sdiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7> + %div = sdiv nof <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7,i16 7, i16 7, i16 7, i16 7> ret <16 x i16> %div ; CHECK: 'Cost Model Analysis' for function 'test6': @@ -71,7 +71,7 @@ } define <16 x i8> @test7(<16 x i8> %a) { - %div = sdiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> + %div = sdiv nof <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> ret <16 x i8> %div ; CHECK: 'Cost Model Analysis' for function 'test7': @@ -81,7 +81,7 @@ } define <4 x i32> @test8(<4 x i32> %a) { - %div = sdiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7> + %div = sdiv nof <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7> ret <4 x i32> %div ; CHECK: 'Cost Model Analysis' for function 'test8': @@ -91,7 +91,7 @@ } define <8 x i32> @test9(<8 x i32> %a) { - %div = sdiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7> + %div = sdiv nof <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7> ret <8 x i32> %div ; CHECK: 'Cost Model Analysis' for function 'test9': @@ -102,7 +102,7 @@ } define <8 x i32> @test10(<8 x i32> %a) { - %div = sdiv <8 x i32> %a, <i32 8, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7> + %div = sdiv nof <8 x i32> %a, <i32 8, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7> ret <8 x i32> %div ; CHECK: 'Cost Model Analysis' for function 'test10': @@ -112,7 +112,7 @@ } define <16 x i32> @test11(<16 x i32> %a) { - %div = sdiv <16 x i32> %a, <i32 8, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7, i32 8, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7> + %div = sdiv nof <16 x i32> %a, <i32 8, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7, i32 8, i32 7, i32 7, i32 7,i32 7, i32 7, i32 7, i32 7> ret <16 x i32> %div ; CHECK: 'Cost Model Analysis' for function 'test11': Index: test/Analysis/DependenceAnalysis/BasePtrBug.ll =================================================================== --- test/Analysis/DependenceAnalysis/BasePtrBug.ll +++ test/Analysis/DependenceAnalysis/BasePtrBug.ll @@ -20,7 +20,7 @@ br i1 %cmp9, label %for.body.lr.ph, label %for.end for.body.lr.ph: - %div = sdiv i32 %N, 2 + %div = sdiv nof i32 %N, 2 %bot.gep = getelementptr i32, i32* %A, i32 %div br label %for.body Index: test/Analysis/DivergenceAnalysis/NVPTX/diverge.ll =================================================================== --- test/Analysis/DivergenceAnalysis/NVPTX/diverge.ll +++ test/Analysis/DivergenceAnalysis/NVPTX/diverge.ll @@ -172,7 +172,7 @@ %i3 = phi i32 [ %j1, %loop_entry_1 ], [ %j2, %loop_entry_2 ] br label %loop_latch loop_latch: - %div = sdiv i32 %tid, %i3 + %div = sdiv nof i32 %tid, %i3 switch i32 %div, label %branch [ i32 1, label %loop_entry_1 i32 2, label %loop_entry_2 ] branch: Index: test/Analysis/Lint/check-zero-divide.ll =================================================================== --- test/Analysis/Lint/check-zero-divide.ll +++ test/Analysis/Lint/check-zero-divide.ll @@ -1,7 +1,7 @@ ; RUN: opt -lint -disable-output %s 2>&1 | FileCheck %s define <2 x i32> @use_vector_sdiv(<2 x i32> %a) nounwind { - %b = sdiv <2 x i32> %a, <i32 5, i32 8> + %b = sdiv nof <2 x i32> %a, <i32 5, i32 8> ret <2 x i32> %b } @@ -11,7 +11,7 @@ } define <2 x i32> @use_vector_udiv(<2 x i32> %a) nounwind { - %b = udiv <2 x i32> %a, <i32 5, i32 8> + %b = udiv nof <2 x i32> %a, <i32 5, i32 8> ret <2 x i32> %b } @@ -22,57 +22,57 @@ define i32 @use_sdiv_by_zero(i32 %a) nounwind { ; CHECK: Undefined behavior: Division by zero -; CHECK-NEXT: %b = sdiv i32 %a, 0 - %b = sdiv i32 %a, 0 +; CHECK-NEXT: %b = sdiv nof i32 %a, 0 + %b = sdiv nof i32 %a, 0 ret i32 %b } define i32 @use_sdiv_by_zeroinitializer(i32 %a) nounwind { ; CHECK: Undefined behavior: Division by zero -; CHECK-NEXT: %b = sdiv i32 %a, 0 - %b = sdiv i32 %a, zeroinitializer +; CHECK-NEXT: %b = sdiv nof i32 %a, 0 + %b = sdiv nof i32 %a, zeroinitializer ret i32 %b } define <2 x i32> @use_vector_sdiv_by_zero_x(<2 x i32> %a) nounwind { ; CHECK: Undefined behavior: Division by zero -; CHECK-NEXT: %b = sdiv <2 x i32> %a, <i32 0, i32 5> - %b = sdiv <2 x i32> %a, <i32 0, i32 5> +; CHECK-NEXT: %b = sdiv nof <2 x i32> %a, <i32 0, i32 5> + %b = sdiv nof <2 x i32> %a, <i32 0, i32 5> ret <2 x i32> %b } define <2 x i32> @use_vector_sdiv_by_zero_y(<2 x i32> %a) nounwind { ; CHECK: Undefined behavior: Division by zero -; CHECK-NEXT: %b = sdiv <2 x i32> %a, <i32 4, i32 0> - %b = sdiv <2 x i32> %a, <i32 4, i32 0> +; CHECK-NEXT: %b = sdiv nof <2 x i32> %a, <i32 4, i32 0> + %b = sdiv nof <2 x i32> %a, <i32 4, i32 0> ret <2 x i32> %b } define <2 x i32> @use_vector_sdiv_by_zero_xy(<2 x i32> %a) nounwind { ; CHECK: Undefined behavior: Division by zero -; CHECK-NEXT: %b = sdiv <2 x i32> %a, zeroinitializer - %b = sdiv <2 x i32> %a, <i32 0, i32 0> +; CHECK-NEXT: %b = sdiv nof <2 x i32> %a, zeroinitializer + %b = sdiv nof <2 x i32> %a, <i32 0, i32 0> ret <2 x i32> %b } define <2 x i32> @use_vector_sdiv_by_undef_x(<2 x i32> %a) nounwind { ; CHECK: Undefined behavior: Division by zero -; CHECK-NEXT: %b = sdiv <2 x i32> %a, <i32 undef, i32 5> - %b = sdiv <2 x i32> %a, <i32 undef, i32 5> +; CHECK-NEXT: %b = sdiv nof <2 x i32> %a, <i32 undef, i32 5> + %b = sdiv nof <2 x i32> %a, <i32 undef, i32 5> ret <2 x i32> %b } define <2 x i32> @use_vector_sdiv_by_undef_y(<2 x i32> %a) nounwind { ; CHECK: Undefined behavior: Division by zero -; CHECK-NEXT: %b = sdiv <2 x i32> %a, <i32 5, i32 undef> - %b = sdiv <2 x i32> %a, <i32 5, i32 undef> +; CHECK-NEXT: %b = sdiv nof <2 x i32> %a, <i32 5, i32 undef> + %b = sdiv nof <2 x i32> %a, <i32 5, i32 undef> ret <2 x i32> %b } define <2 x i32> @use_vector_sdiv_by_undef_xy(<2 x i32> %a) nounwind { ; CHECK: Undefined behavior: Division by zero -; CHECK-NEXT: %b = sdiv <2 x i32> %a, undef - %b = sdiv <2 x i32> %a, <i32 undef, i32 undef> +; CHECK-NEXT: %b = sdiv nof <2 x i32> %a, undef + %b = sdiv nof <2 x i32> %a, <i32 undef, i32 undef> ret <2 x i32> %b } Index: test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll =================================================================== --- test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll +++ test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll @@ -18,7 +18,7 @@ %i.0.i6 = phi i32 [ %8, %bb4.i ], [ 0, %entry ] ; <i32> [#uses=2] %5 = sub i32 %4, %i.0.i6 ; <i32> [#uses=1] %6 = sext i32 %5 to i64 ; <i64> [#uses=1] - %7 = udiv i64 undef, %6 ; <i64> [#uses=1] + %7 = udiv nof i64 undef, %6 ; <i64> [#uses=1] %8 = add i32 %i.0.i6, 1 ; <i32> [#uses=2] %phitmp = icmp eq i64 %7, 0 ; <i1> [#uses=1] %.not.i = icmp sge i32 %8, %4 ; <i1> [#uses=1] Index: test/Analysis/ScalarEvolution/avoid-smax-1.ll =================================================================== --- test/Analysis/ScalarEvolution/avoid-smax-1.ll +++ test/Analysis/ScalarEvolution/avoid-smax-1.ll @@ -15,7 +15,7 @@ entry: %0 = mul i32 %x, %w ; <i32> [#uses=2] %1 = mul i32 %x, %w ; <i32> [#uses=1] - %2 = sdiv i32 %1, 4 ; <i32> [#uses=1] + %2 = sdiv nof i32 %1, 4 ; <i32> [#uses=1] %.sum2 = add i32 %2, %0 ; <i32> [#uses=2] %cond = icmp eq i32 %d, 1 ; <i1> [#uses=1] br i1 %cond, label %bb29, label %bb10.preheader @@ -75,9 +75,9 @@ br i1 true, label %bb.nph7, label %bb9 bb.nph5: ; preds = %bb18.loopexit - %18 = sdiv i32 %w, 2 ; <i32> [#uses=1] + %18 = sdiv nof i32 %w, 2 ; <i32> [#uses=1] %19 = icmp slt i32 %w, 2 ; <i1> [#uses=1] - %20 = sdiv i32 %x, 2 ; <i32> [#uses=1] + %20 = sdiv nof i32 %x, 2 ; <i32> [#uses=1] br i1 %19, label %bb18.bb20_crit_edge.split, label %bb.nph5.split bb.nph5.split: ; preds = %bb.nph5 @@ -95,7 +95,7 @@ bb.nph3: ; preds = %bb13 %26 = add i32 %21, %0 ; <i32> [#uses=1] %27 = add i32 %21, %.sum2 ; <i32> [#uses=1] - %28 = sdiv i32 %w, 2 ; <i32> [#uses=1] + %28 = sdiv nof i32 %w, 2 ; <i32> [#uses=1] br label %bb14 bb14: ; preds = %bb15, %bb.nph3 @@ -151,7 +151,7 @@ bb22: ; preds = %bb20 %45 = mul i32 %x, %w ; <i32> [#uses=1] - %46 = sdiv i32 %45, 4 ; <i32> [#uses=1] + %46 = sdiv nof i32 %45, 4 ; <i32> [#uses=1] %.sum3 = add i32 %46, %.sum2 ; <i32> [#uses=2] %47 = add i32 %x, 15 ; <i32> [#uses=1] %48 = and i32 %47, -16 ; <i32> [#uses=1] @@ -188,7 +188,7 @@ %.sum4 = add i32 %.sum3, %59 ; <i32> [#uses=1] %60 = getelementptr i8, i8* %j, i32 %.sum4 ; <i8*> [#uses=1] %61 = mul i32 %x, %w ; <i32> [#uses=1] - %62 = sdiv i32 %61, 2 ; <i32> [#uses=1] + %62 = sdiv nof i32 %61, 2 ; <i32> [#uses=1] tail call void @llvm.memset.p0i8.i32(i8* %60, i8 -128, i32 %62, i32 1, i1 false) ret void @@ -222,7 +222,7 @@ %72 = mul i32 %x, %w ; <i32> [#uses=1] %73 = getelementptr i8, i8* %j, i32 %72 ; <i8*> [#uses=1] %74 = mul i32 %x, %w ; <i32> [#uses=1] - %75 = sdiv i32 %74, 2 ; <i32> [#uses=1] + %75 = sdiv nof i32 %74, 2 ; <i32> [#uses=1] tail call void @llvm.memset.p0i8.i32(i8* %73, i8 -128, i32 %75, i32 1, i1 false) ret void Index: test/Analysis/ScalarEvolution/flags-from-poison.ll =================================================================== --- test/Analysis/ScalarEvolution/flags-from-poison.ll +++ test/Analysis/ScalarEvolution/flags-from-poison.ll @@ -371,7 +371,7 @@ ; CHECK: --> {%offset,+,1}<nsw> %j = add nsw i32 %i, %offset - %q = sdiv i32 %numIterations, %j + %q = sdiv nof i32 %numIterations, %j %nexti = add nsw i32 %i, 1 %exitcond = icmp eq i32 %nexti, %numIterations br i1 %exitcond, label %exit, label %loop @@ -391,7 +391,7 @@ ; CHECK: --> {%offset,+,1}<nw> %j = add nsw i32 %i, %offset - %q = sdiv i32 %j, %numIterations + %q = sdiv nof i32 %j, %numIterations %nexti = add nsw i32 %i, 1 %exitcond = icmp eq i32 %nexti, %numIterations br i1 %exitcond, label %exit, label %loop @@ -707,7 +707,7 @@ ; CHECK: %v = ; CHECK-NEXT: --> {{[{][{]}}-1,+,-1}<nw><%outer>,+,1}<nsw><%inner> %v = sub nsw i32 %i_idx, %o_idx.inc - %forub = udiv i32 1, %v + %forub = udiv nof i32 1, %v %cond2 = icmp eq i32 %i_idx, %inner_l br i1 %cond2, label %outer.be, label %inner Index: test/Analysis/ScalarEvolution/flattened-0.ll =================================================================== --- test/Analysis/ScalarEvolution/flattened-0.ll +++ test/Analysis/ScalarEvolution/flattened-0.ll @@ -7,7 +7,7 @@ bb: %idx = phi i64 [ 0, %entry ], [ %idx.incr, %bb ] - %i = udiv i64 %idx, 7 + %i = udiv nof i64 %idx, 7 %j = urem i64 %idx, 7 %a.ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 %i, i64 %j ; CHECK: %a.ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 %i, i64 %j Index: test/Analysis/ScalarEvolution/implied-via-division.ll =================================================================== --- test/Analysis/ScalarEvolution/implied-via-division.ll +++ test/Analysis/ScalarEvolution/implied-via-division.ll @@ -8,7 +8,7 @@ ; CHECK: Loop %header: backedge-taken count is (-1 + %n.div.2)<nsw> entry: %cmp1 = icmp sgt i32 %n, 1 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -28,7 +28,7 @@ ; CHECK: Loop %header: backedge-taken count is (-1 + (1 smax %n.div.2))<nsw> entry: %cmp1 = icmp sgt i32 %n, 0 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -48,7 +48,7 @@ ; CHECK: Loop %header: backedge-taken count is (-1 + %n.div.2)<nsw> entry: %cmp1 = icmp sge i32 %n, 2 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -68,7 +68,7 @@ ; CHECK: Loop %header: backedge-taken count is (-1 + (1 smax %n.div.2))<nsw> entry: %cmp1 = icmp sge i32 %n, 1 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -88,7 +88,7 @@ ; CHECK: Loop %header: backedge-taken count is (1 + %n.div.2)<nsw> entry: %cmp1 = icmp sgt i32 %n, -2 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -108,7 +108,7 @@ ; CHECK: Loop %header: backedge-taken count is (0 smax (1 + %n.div.2)<nsw>) entry: %cmp1 = icmp sgt i32 %n, -3 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -128,7 +128,7 @@ ; CHECK: Loop %header: backedge-taken count is (1 + %n.div.2)<nsw> entry: %cmp1 = icmp sge i32 %n, -1 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -148,7 +148,7 @@ ; CHECK: Loop %header: backedge-taken count is (0 smax (1 + %n.div.2)<nsw>) entry: %cmp1 = icmp sge i32 %n, -2 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -168,7 +168,7 @@ ; CHECK: Loop %header: backedge-taken count is (-1 + (sext i32 %n.div.2 to i64))<nsw> entry: %cmp1 = icmp sgt i32 %n, 1 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 %n.div.2.ext = sext i32 %n.div.2 to i64 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -189,7 +189,7 @@ ; CHECK: Loop %header: backedge-taken count is (-1 + (1 smax (sext i32 %n.div.2 to i64)))<nsw> entry: %cmp1 = icmp sgt i32 %n, 0 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 %n.div.2.ext = sext i32 %n.div.2 to i64 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -210,7 +210,7 @@ ; CHECK: Loop %header: backedge-taken count is (-1 + (sext i32 %n.div.2 to i64))<nsw> entry: %cmp1 = icmp sge i32 %n, 2 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 %n.div.2.ext = sext i32 %n.div.2 to i64 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -231,7 +231,7 @@ ; CHECK: Loop %header: backedge-taken count is (-1 + (1 smax (sext i32 %n.div.2 to i64)))<nsw> entry: %cmp1 = icmp sge i32 %n, 1 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 %n.div.2.ext = sext i32 %n.div.2 to i64 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -252,7 +252,7 @@ ; CHECK: Loop %header: backedge-taken count is (1 + (sext i32 %n.div.2 to i64))<nsw> entry: %cmp1 = icmp sgt i32 %n, -2 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 %n.div.2.ext = sext i32 %n.div.2 to i64 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -273,7 +273,7 @@ ; CHECK: Loop %header: backedge-taken count is (0 smax (1 + (sext i32 %n.div.2 to i64))<nsw>) entry: %cmp1 = icmp sgt i32 %n, -3 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 %n.div.2.ext = sext i32 %n.div.2 to i64 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -294,7 +294,7 @@ ; CHECK: Loop %header: backedge-taken count is (1 + (sext i32 %n.div.2 to i64))<nsw> entry: %cmp1 = icmp sge i32 %n, -1 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 %n.div.2.ext = sext i32 %n.div.2 to i64 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header @@ -315,7 +315,7 @@ ; CHECK: Loop %header: backedge-taken count is (0 smax (1 + (sext i32 %n.div.2 to i64))<nsw>) entry: %cmp1 = icmp sge i32 %n, -2 - %n.div.2 = sdiv i32 %n, 2 + %n.div.2 = sdiv nof i32 %n, 2 %n.div.2.ext = sext i32 %n.div.2 to i64 call void(i1, ...) @llvm.experimental.guard(i1 %cmp1) [ "deopt"() ] br label %header Index: test/Analysis/ScalarEvolution/undefined.ll =================================================================== --- test/Analysis/ScalarEvolution/undefined.ll +++ test/Analysis/ScalarEvolution/undefined.ll @@ -5,7 +5,7 @@ define void @foo(i64 %x) { - %a = udiv i64 %x, 0 + %a = udiv nof i64 %x, 0 ; CHECK: --> (%x /u 0) %B = shl i64 %x, 64 Index: test/Analysis/ValueTracking/known-power-of-two.ll =================================================================== --- test/Analysis/ValueTracking/known-power-of-two.ll +++ test/Analysis/ValueTracking/known-power-of-two.ll @@ -3,7 +3,7 @@ ; https://llvm.org/bugs/show_bug.cgi?id=25900 ; An arithmetic shift right of a power of two is not a power ; of two if the original value is the sign bit. Therefore, -; we can't transform the sdiv into a udiv. +; we can't transform the sdiv nof into a udiv. define i32 @pr25900(i32 %d) { %and = and i32 %d, -2147483648 @@ -12,7 +12,7 @@ %or = or i64 %ext, 4294967296 %trunc = trunc i64 %or to i32 %ashr = ashr exact i32 %trunc, 31 - %div = sdiv i32 4, %ashr + %div = sdiv nof i32 4, %ashr ret i32 %div ; CHECK: sdiv Index: test/Assembler/ConstantExprFold.ll =================================================================== --- test/Assembler/ConstantExprFold.ll +++ test/Assembler/ConstantExprFold.ll @@ -9,7 +9,7 @@ @0 = global i64* inttoptr (i64 add (i64 ptrtoint (i64* @A to i64), i64 0) to i64*) ; X + 0 == X @1 = global i64* inttoptr (i64 sub (i64 ptrtoint (i64* @A to i64), i64 0) to i64*) ; X - 0 == X @2 = global i64* inttoptr (i64 mul (i64 ptrtoint (i64* @A to i64), i64 0) to i64*) ; X * 0 == 0 -@3 = global i64* inttoptr (i64 sdiv (i64 ptrtoint (i64* @A to i64), i64 1) to i64*) ; X / 1 == X +@3 = global i64* inttoptr (i64 sdiv nof (i64 ptrtoint (i64* @A to i64), i64 1) to i64*) ; X / 1 == X @4 = global i64* inttoptr (i64 srem (i64 ptrtoint (i64* @A to i64), i64 1) to i64*) ; X % 1 == 0 @5 = global i64* inttoptr (i64 and (i64 ptrtoint (i64* @A to i64), i64 0) to i64*) ; X & 0 == 0 @6 = global i64* inttoptr (i64 and (i64 ptrtoint (i64* @A to i64), i64 -1) to i64*) ; X & -1 == X Index: test/Assembler/flags.ll =================================================================== --- test/Assembler/flags.ll +++ test/Assembler/flags.ll @@ -100,26 +100,26 @@ } define i64 @sdiv_exact(i64 %x, i64 %y) { -; CHECK: %z = sdiv exact i64 %x, %y - %z = sdiv exact i64 %x, %y +; CHECK: %z = sdiv exact nof i64 %x, %y + %z = sdiv exact nof i64 %x, %y ret i64 %z } define i64 @sdiv_plain(i64 %x, i64 %y) { -; CHECK: %z = sdiv i64 %x, %y - %z = sdiv i64 %x, %y +; CHECK: %z = sdiv nof i64 %x, %y + %z = sdiv nof i64 %x, %y ret i64 %z } define i64 @udiv_exact(i64 %x, i64 %y) { -; CHECK: %z = udiv exact i64 %x, %y - %z = udiv exact i64 %x, %y +; CHECK: %z = udiv exact nof i64 %x, %y + %z = udiv exact nof i64 %x, %y ret i64 %z } define i64 @udiv_plain(i64 %x, i64 %y) { -; CHECK: %z = udiv i64 %x, %y - %z = udiv i64 %x, %y +; CHECK: %z = udiv nof i64 %x, %y + %z = udiv nof i64 %x, %y ret i64 %z } @@ -175,13 +175,13 @@ } define i64 @sdiv_exact_ce() { -; CHECK: ret i64 sdiv exact (i64 ptrtoint (i64* @addr to i64), i64 91) - ret i64 sdiv exact (i64 ptrtoint (i64* @addr to i64), i64 91) +; CHECK: ret i64 sdiv exact nof (i64 ptrtoint (i64* @addr to i64), i64 91) + ret i64 sdiv exact nof (i64 ptrtoint (i64* @addr to i64), i64 91) } define i64 @udiv_exact_ce() { -; CHECK: ret i64 udiv exact (i64 ptrtoint (i64* @addr to i64), i64 91) - ret i64 udiv exact (i64 ptrtoint (i64* @addr to i64), i64 91) +; CHECK: ret i64 udiv exact nof (i64 ptrtoint (i64* @addr to i64), i64 91) + ret i64 udiv exact nof (i64 ptrtoint (i64* @addr to i64), i64 91) } define i64 @ashr_exact_ce() { @@ -215,8 +215,8 @@ } define i64 @sdiv_plain_ce() { -; CHECK: ret i64 sdiv (i64 ptrtoint (i64* @addr to i64), i64 91) - ret i64 sdiv (i64 ptrtoint (i64* @addr to i64), i64 91) +; CHECK: ret i64 sdiv nof (i64 ptrtoint (i64* @addr to i64), i64 91) + ret i64 sdiv nof (i64 ptrtoint (i64* @addr to i64), i64 91) } define i64* @gep_plain_ce() { Index: test/Bindings/llvm-c/echo.ll =================================================================== --- test/Bindings/llvm-c/echo.ll +++ test/Bindings/llvm-c/echo.ll @@ -49,8 +49,8 @@ %1 = add i32 %a, %b %2 = mul i32 %a, %1 %3 = sub i32 %2, %1 - %4 = udiv i32 %3, %b - %5 = sdiv i32 %2, %4 + %4 = udiv nof i32 %3, %b + %5 = sdiv nof i32 %2, %4 %6 = urem i32 %3, %5 %7 = srem i32 %2, %6 %8 = shl i32 %1, %b Index: test/Bitcode/compatibility.ll =================================================================== --- test/Bitcode/compatibility.ll +++ test/Bitcode/compatibility.ll @@ -1037,6 +1037,16 @@ sdiv exact i8 %op1, %op2 ; CHECK: sdiv exact i8 %op1, %op2 + ; nof + udiv nof i8 %op1, %op2 + ; CHECK: udiv nof i8 %op1, %op2 + udiv exact nof i8 %op1, %op2 + ; CHECK: udiv exact nof i8 %op1, %op2 + sdiv nof i8 %op1, %op2 + ; CHECK: sdiv nof i8 %op1, %op2 + sdiv exact nof i8 %op1, %op2 + ; CHECK: sdiv exact nof i8 %op1, %op2 + ; none urem i8 %op1, %op2 ; CHECK: urem i8 %op1, %op2 Index: test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -662,7 +662,7 @@ ; CHECK-NEXT: %w0 = COPY [[RES]] ; CHECK-NEXT: RET_ReallyLR implicit %w0 define i32 @test_sdiv(i32 %arg1, i32 %arg2) { - %res = sdiv i32 %arg1, %arg2 + %res = sdiv nof i32 %arg1, %arg2 ret i32 %res } @@ -673,7 +673,7 @@ ; CHECK-NEXT: %w0 = COPY [[RES]] ; CHECK-NEXT: RET_ReallyLR implicit %w0 define i32 @test_udiv(i32 %arg1, i32 %arg2) { - %res = udiv i32 %arg1, %arg2 + %res = udiv nof i32 %arg1, %arg2 ret i32 %res } Index: test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll =================================================================== --- test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll +++ test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll @@ -195,7 +195,7 @@ %mul = mul nsw i64 %conv1, %conv %0 = load i32, i32* %d, align 4 %conv2 = sext i32 %0 to i64 - %div = sdiv i64 %mul, %conv2 + %div = sdiv nof i64 %mul, %conv2 ret i64 %div } ; CHECK-LABEL: f_load_smull: @@ -214,7 +214,7 @@ %sub = mul i64 %conv1, %mul %0 = load i32, i32* %d, align 4 %conv2 = sext i32 %0 to i64 - %div = sdiv i64 %sub, %conv2 + %div = sdiv nof i64 %sub, %conv2 ret i64 %div } ; CHECK-LABEL: f_load_smnegl_64: @@ -270,7 +270,7 @@ %mul = mul i64 %conv1, %conv %0 = load i32, i32* %d, align 4 %conv2 = zext i32 %0 to i64 - %div = udiv i64 %mul, %conv2 + %div = udiv nof i64 %mul, %conv2 ret i64 %div } ; CHECK-LABEL: f_load_umull: @@ -289,7 +289,7 @@ %sub = mul i64 %conv1, %mul %0 = load i32, i32* %d, align 4 %conv2 = zext i32 %0 to i64 - %div = udiv i64 %sub, %conv2 + %div = udiv nof i64 %sub, %conv2 ret i64 %div } ; CHECK-LABEL: f_load_umnegl_64: Index: test/CodeGen/AArch64/analyzecmp.ll =================================================================== --- test/CodeGen/AArch64/analyzecmp.ll +++ test/CodeGen/AArch64/analyzecmp.ll @@ -11,7 +11,7 @@ entry: %conv = and i64 %a, 4294967295 %add = add nsw i64 %conv, -1 - %div = sdiv i64 %add, 64 + %div = sdiv nof i64 %add, 64 %rem = srem i64 %add, 64 %cmp = icmp slt i64 %rem, 0 br i1 %cmp, label %if.then, label %exit Index: test/CodeGen/AArch64/arm64-arith.ll =================================================================== --- test/CodeGen/AArch64/arm64-arith.ll +++ test/CodeGen/AArch64/arm64-arith.ll @@ -14,7 +14,7 @@ ; CHECK-LABEL: t2: ; CHECK: udiv w0, w0, w1 ; CHECK: ret - %udiv = udiv i32 %a, %b + %udiv = udiv nof i32 %a, %b ret i32 %udiv } @@ -23,7 +23,7 @@ ; CHECK-LABEL: t3: ; CHECK: udiv x0, x0, x1 ; CHECK: ret - %udiv = udiv i64 %a, %b + %udiv = udiv nof i64 %a, %b ret i64 %udiv } @@ -32,7 +32,7 @@ ; CHECK-LABEL: t4: ; CHECK: sdiv w0, w0, w1 ; CHECK: ret - %sdiv = sdiv i32 %a, %b + %sdiv = sdiv nof i32 %a, %b ret i32 %sdiv } @@ -41,7 +41,7 @@ ; CHECK-LABEL: t5: ; CHECK: sdiv x0, x0, x1 ; CHECK: ret - %sdiv = sdiv i64 %a, %b + %sdiv = sdiv nof i64 %a, %b ret i64 %sdiv } Index: test/CodeGen/AArch64/arm64-ccmp.ll =================================================================== --- test/CodeGen/AArch64/arm64-ccmp.ll +++ test/CodeGen/AArch64/arm64-ccmp.ll @@ -118,7 +118,7 @@ br i1 %cmp, label %land.lhs.true, label %if.end land.lhs.true: - %div = sdiv i32 %b, %a + %div = sdiv nof i32 %b, %a %cmp1 = icmp slt i32 %div, 17 br i1 %cmp1, label %if.then, label %if.end @@ -168,7 +168,7 @@ br i1 %cmp, label %land.lhs.true, label %if.end land.lhs.true: - %div = sdiv i32 %b, %a + %div = sdiv nof i32 %b, %a %cmp1 = icmp eq i32 %div, 5 %cmp4 = icmp sgt i32 %div, %c %or.cond = and i1 %cmp1, %cmp4 Index: test/CodeGen/AArch64/arm64-fast-isel.ll =================================================================== --- test/CodeGen/AArch64/arm64-fast-isel.ll +++ test/CodeGen/AArch64/arm64-fast-isel.ll @@ -107,7 +107,7 @@ ; CHECK: mul x{{[0-9]+}}, [[ARG1:x[0-9]+]], [[ARG2:x[0-9]+]] ; CHECK-NEXT: umulh x{{[0-9]+}}, [[ARG1]], [[ARG2]] entry: - %sub.ptr.div = sdiv exact i64 %arg, 8 + %sub.ptr.div = sdiv exact nof i64 %arg, 8 %tmp = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %sub.ptr.div, i64 8) %tmp1 = extractvalue { i64, i1 } %tmp, 0 ret i64 %tmp1 Index: test/CodeGen/AArch64/arm64-misched-basic-A57.ll =================================================================== --- test/CodeGen/AArch64/arm64-misched-basic-A57.ll +++ test/CodeGen/AArch64/arm64-misched-basic-A57.ll @@ -85,7 +85,7 @@ store i32 %add15, i32* %xx, align 4 - %div = sdiv i32 %4, %5 + %div = sdiv nof i32 %4, %5 store i32 %div, i32* %yy, align 4 Index: test/CodeGen/AArch64/arm64-neon-mul-div.ll =================================================================== --- test/CodeGen/AArch64/arm64-neon-mul-div.ll +++ test/CodeGen/AArch64/arm64-neon-mul-div.ll @@ -102,7 +102,7 @@ define <1 x i8> @sdiv1x8(<1 x i8> %A, <1 x i8> %B) { ; CHECK-LABEL: sdiv1x8: ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = sdiv <1 x i8> %A, %B; + %tmp3 = sdiv nof <1 x i8> %A, %B; ret <1 x i8> %tmp3 } @@ -116,7 +116,7 @@ ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = sdiv <8 x i8> %A, %B; + %tmp3 = sdiv nof <8 x i8> %A, %B; ret <8 x i8> %tmp3 } @@ -138,14 +138,14 @@ ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = sdiv <16 x i8> %A, %B; + %tmp3 = sdiv nof <16 x i8> %A, %B; ret <16 x i8> %tmp3 } define <1 x i16> @sdiv1x16(<1 x i16> %A, <1 x i16> %B) { ; CHECK-LABEL: sdiv1x16: ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = sdiv <1 x i16> %A, %B; + %tmp3 = sdiv nof <1 x i16> %A, %B; ret <1 x i16> %tmp3 } @@ -155,7 +155,7 @@ ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = sdiv <4 x i16> %A, %B; + %tmp3 = sdiv nof <4 x i16> %A, %B; ret <4 x i16> %tmp3 } @@ -169,14 +169,14 @@ ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = sdiv <8 x i16> %A, %B; + %tmp3 = sdiv nof <8 x i16> %A, %B; ret <8 x i16> %tmp3 } define <1 x i32> @sdiv1x32(<1 x i32> %A, <1 x i32> %B) { ; CHECK-LABEL: sdiv1x32: ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = sdiv <1 x i32> %A, %B; + %tmp3 = sdiv nof <1 x i32> %A, %B; ret <1 x i32> %tmp3 } @@ -184,7 +184,7 @@ ; CHECK-LABEL: sdiv2x32: ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = sdiv <2 x i32> %A, %B; + %tmp3 = sdiv nof <2 x i32> %A, %B; ret <2 x i32> %tmp3 } @@ -194,14 +194,14 @@ ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = sdiv <4 x i32> %A, %B; + %tmp3 = sdiv nof <4 x i32> %A, %B; ret <4 x i32> %tmp3 } define <1 x i64> @sdiv1x64(<1 x i64> %A, <1 x i64> %B) { ; CHECK-LABEL: sdiv1x64: ; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} - %tmp3 = sdiv <1 x i64> %A, %B; + %tmp3 = sdiv nof <1 x i64> %A, %B; ret <1 x i64> %tmp3 } @@ -209,14 +209,14 @@ ; CHECK-LABEL: sdiv2x64: ; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} ; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} - %tmp3 = sdiv <2 x i64> %A, %B; + %tmp3 = sdiv nof <2 x i64> %A, %B; ret <2 x i64> %tmp3 } define <1 x i8> @udiv1x8(<1 x i8> %A, <1 x i8> %B) { ; CHECK-LABEL: udiv1x8: ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = udiv <1 x i8> %A, %B; + %tmp3 = udiv nof <1 x i8> %A, %B; ret <1 x i8> %tmp3 } @@ -230,7 +230,7 @@ ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = udiv <8 x i8> %A, %B; + %tmp3 = udiv nof <8 x i8> %A, %B; ret <8 x i8> %tmp3 } @@ -252,14 +252,14 @@ ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = udiv <16 x i8> %A, %B; + %tmp3 = udiv nof <16 x i8> %A, %B; ret <16 x i8> %tmp3 } define <1 x i16> @udiv1x16(<1 x i16> %A, <1 x i16> %B) { ; CHECK-LABEL: udiv1x16: ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = udiv <1 x i16> %A, %B; + %tmp3 = udiv nof <1 x i16> %A, %B; ret <1 x i16> %tmp3 } @@ -269,7 +269,7 @@ ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = udiv <4 x i16> %A, %B; + %tmp3 = udiv nof <4 x i16> %A, %B; ret <4 x i16> %tmp3 } @@ -283,14 +283,14 @@ ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = udiv <8 x i16> %A, %B; + %tmp3 = udiv nof <8 x i16> %A, %B; ret <8 x i16> %tmp3 } define <1 x i32> @udiv1x32(<1 x i32> %A, <1 x i32> %B) { ; CHECK-LABEL: udiv1x32: ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = udiv <1 x i32> %A, %B; + %tmp3 = udiv nof <1 x i32> %A, %B; ret <1 x i32> %tmp3 } @@ -298,7 +298,7 @@ ; CHECK-LABEL: udiv2x32: ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = udiv <2 x i32> %A, %B; + %tmp3 = udiv nof <2 x i32> %A, %B; ret <2 x i32> %tmp3 } @@ -308,14 +308,14 @@ ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} - %tmp3 = udiv <4 x i32> %A, %B; + %tmp3 = udiv nof <4 x i32> %A, %B; ret <4 x i32> %tmp3 } define <1 x i64> @udiv1x64(<1 x i64> %A, <1 x i64> %B) { ; CHECK-LABEL: udiv1x64: ; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} - %tmp3 = udiv <1 x i64> %A, %B; + %tmp3 = udiv nof <1 x i64> %A, %B; ret <1 x i64> %tmp3 } @@ -323,7 +323,7 @@ ; CHECK-LABEL: udiv2x64: ; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} ; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} - %tmp3 = udiv <2 x i64> %A, %B; + %tmp3 = udiv nof <2 x i64> %A, %B; ret <2 x i64> %tmp3 } Index: test/CodeGen/AArch64/div_minsize.ll =================================================================== --- test/CodeGen/AArch64/div_minsize.ll +++ test/CodeGen/AArch64/div_minsize.ll @@ -2,7 +2,7 @@ define i32 @testsize1(i32 %x) minsize nounwind { entry: - %div = sdiv i32 %x, 32 + %div = sdiv nof i32 %x, 32 ret i32 %div ; CHECK-LABEL: testsize1 ; CHECK: sdiv @@ -10,7 +10,7 @@ define i32 @testsize2(i32 %x) minsize nounwind { entry: - %div = sdiv i32 %x, 33 + %div = sdiv nof i32 %x, 33 ret i32 %div ; CHECK-LABEL: testsize2 ; CHECK: sdiv @@ -18,7 +18,7 @@ define i32 @testsize3(i32 %x) minsize nounwind { entry: - %div = udiv i32 %x, 32 + %div = udiv nof i32 %x, 32 ret i32 %div ; CHECK-LABEL: testsize3 ; CHECK: lsr @@ -26,7 +26,7 @@ define i32 @testsize4(i32 %x) minsize nounwind { entry: - %div = udiv i32 %x, 33 + %div = udiv nof i32 %x, 33 ret i32 %div ; CHECK-LABEL: testsize4 ; CHECK: udiv @@ -39,7 +39,7 @@ ; CHECK: usra v0.8h, v1.8h, #11 ; CHECK: sshr v0.8h, v0.8h, #5 ; CHECK: ret - %0 = sdiv <8 x i16> %var, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> + %0 = sdiv nof <8 x i16> %var, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> ret <8 x i16> %0 } Index: test/CodeGen/AArch64/divrem.ll =================================================================== --- test/CodeGen/AArch64/divrem.ll +++ test/CodeGen/AArch64/divrem.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: test_udivrem ; CHECK-DAG: udivrem ; CHECK-NOT: LLVM ERROR: Cannot select - %div = udiv <2 x i32> %x, %y + %div = udiv nof <2 x i32> %x, %y store <2 x i32> %div, <2 x i32>* %z %1 = urem <2 x i32> %x, %y ret <2 x i32> %1 @@ -15,7 +15,7 @@ define <4 x i32> @test_sdivrem(<4 x i32> %x, <4 x i32>* %y) { ; CHECK-LABEL: test_sdivrem ; CHECK-DAG: sdivrem - %div = sdiv <4 x i32> %x, < i32 20, i32 20, i32 20, i32 20 > + %div = sdiv nof <4 x i32> %x, < i32 20, i32 20, i32 20, i32 20 > store <4 x i32> %div, <4 x i32>* %y %1 = srem <4 x i32> %x, < i32 20, i32 20, i32 20, i32 20 > ret <4 x i32> %1 Index: test/CodeGen/AArch64/dp2.ll =================================================================== --- test/CodeGen/AArch64/dp2.ll +++ test/CodeGen/AArch64/dp2.ll @@ -52,7 +52,7 @@ ; CHECK-LABEL: udiv_i64: %val0_tmp = load i64, i64* @var64_0 %val1_tmp = load i64, i64* @var64_1 - %val4_tmp = udiv i64 %val0_tmp, %val1_tmp + %val4_tmp = udiv nof i64 %val0_tmp, %val1_tmp ; CHECK: udiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} store volatile i64 %val4_tmp, i64* @var64_0 ret void @@ -62,7 +62,7 @@ ; CHECK-LABEL: sdiv_i64: %val0_tmp = load i64, i64* @var64_0 %val1_tmp = load i64, i64* @var64_1 - %val4_tmp = sdiv i64 %val0_tmp, %val1_tmp + %val4_tmp = sdiv nof i64 %val0_tmp, %val1_tmp ; CHECK: sdiv {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}} store volatile i64 %val4_tmp, i64* @var64_1 ret void @@ -120,7 +120,7 @@ ; CHECK-LABEL: sdiv_i32: %val0_tmp = load i32, i32* @var32_0 %val1_tmp = load i32, i32* @var32_1 - %val4_tmp = sdiv i32 %val0_tmp, %val1_tmp + %val4_tmp = sdiv nof i32 %val0_tmp, %val1_tmp ; CHECK: sdiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} store volatile i32 %val4_tmp, i32* @var32_1 ret void @@ -130,7 +130,7 @@ ; CHECK-LABEL: udiv_i32: %val0_tmp = load i32, i32* @var32_0 %val1_tmp = load i32, i32* @var32_1 - %val4_tmp = udiv i32 %val0_tmp, %val1_tmp + %val4_tmp = udiv nof i32 %val0_tmp, %val1_tmp ; CHECK: udiv {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}} store volatile i32 %val4_tmp, i32* @var32_0 ret void Index: test/CodeGen/AArch64/fast-isel-sdiv.ll =================================================================== --- test/CodeGen/AArch64/fast-isel-sdiv.ll +++ test/CodeGen/AArch64/fast-isel-sdiv.ll @@ -4,7 +4,7 @@ define i32 @sdiv_i32_exact(i32 %a) { ; CHECK-LABEL: sdiv_i32_exact ; CHECK: asr {{w[0-9]+}}, w0, #3 - %1 = sdiv exact i32 %a, 8 + %1 = sdiv exact nof i32 %a, 8 ret i32 %1 } @@ -14,7 +14,7 @@ ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel [[REG2:w[0-9]+]], [[REG1]], w0, lt ; CHECK-NEXT: asr {{w[0-9]+}}, [[REG2]], #3 - %1 = sdiv i32 %a, 8 + %1 = sdiv nof i32 %a, 8 ret i32 %1 } @@ -24,14 +24,14 @@ ; CHECK-NEXT: cmp w0, #0 ; CHECK-NEXT: csel [[REG2:w[0-9]+]], [[REG1]], w0, lt ; CHECK-NEXT: neg {{w[0-9]+}}, [[REG2]], asr #3 - %1 = sdiv i32 %a, -8 + %1 = sdiv nof i32 %a, -8 ret i32 %1 } define i64 @sdiv_i64_exact(i64 %a) { ; CHECK-LABEL: sdiv_i64_exact ; CHECK: asr {{x[0-9]+}}, x0, #4 - %1 = sdiv exact i64 %a, 16 + %1 = sdiv exact nof i64 %a, 16 ret i64 %1 } @@ -41,7 +41,7 @@ ; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: csel [[REG2:x[0-9]+]], [[REG1]], x0, lt ; CHECK-NEXT: asr {{x[0-9]+}}, [[REG2]], #4 - %1 = sdiv i64 %a, 16 + %1 = sdiv nof i64 %a, 16 ret i64 %1 } @@ -51,6 +51,6 @@ ; CHECK-NEXT: cmp x0, #0 ; CHECK-NEXT: csel [[REG2:x[0-9]+]], [[REG1]], x0, lt ; CHECK-NEXT: neg {{x[0-9]+}}, [[REG2]], asr #4 - %1 = sdiv i64 %a, -16 + %1 = sdiv nof i64 %a, -16 ret i64 %1 } Index: test/CodeGen/AArch64/rem_crash.ll =================================================================== --- test/CodeGen/AArch64/rem_crash.ll +++ test/CodeGen/AArch64/rem_crash.ll @@ -2,7 +2,7 @@ define i8 @test_minsize_uu8(i8 %x) minsize optsize { entry: - %0 = udiv i8 %x, 10 + %0 = udiv nof i8 %x, 10 %1 = urem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -10,7 +10,7 @@ define i8 @test_minsize_ss8(i8 %x) minsize optsize { entry: - %0 = sdiv i8 %x, 10 + %0 = sdiv nof i8 %x, 10 %1 = srem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -18,7 +18,7 @@ define i8 @test_minsize_us8(i8 %x) minsize optsize { entry: - %0 = udiv i8 %x, 10 + %0 = udiv nof i8 %x, 10 %1 = srem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -26,7 +26,7 @@ define i8 @test_minsize_su8(i8 %x) minsize optsize { entry: - %0 = sdiv i8 %x, 10 + %0 = sdiv nof i8 %x, 10 %1 = urem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -34,7 +34,7 @@ define i16 @test_minsize_uu16(i16 %x) minsize optsize { entry: - %0 = udiv i16 %x, 10 + %0 = udiv nof i16 %x, 10 %1 = urem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -42,7 +42,7 @@ define i16 @test_minsize_ss16(i16 %x) minsize optsize { entry: - %0 = sdiv i16 %x, 10 + %0 = sdiv nof i16 %x, 10 %1 = srem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -50,7 +50,7 @@ define i16 @test_minsize_us16(i16 %x) minsize optsize { entry: - %0 = udiv i16 %x, 10 + %0 = udiv nof i16 %x, 10 %1 = srem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -58,7 +58,7 @@ define i16 @test_minsize_su16(i16 %x) minsize optsize { entry: - %0 = sdiv i16 %x, 10 + %0 = sdiv nof i16 %x, 10 %1 = urem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -66,7 +66,7 @@ define i32 @test_minsize_uu32(i32 %x) minsize optsize { entry: - %0 = udiv i32 %x, 10 + %0 = udiv nof i32 %x, 10 %1 = urem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -74,7 +74,7 @@ define i32 @test_minsize_ss32(i32 %x) minsize optsize { entry: - %0 = sdiv i32 %x, 10 + %0 = sdiv nof i32 %x, 10 %1 = srem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -82,7 +82,7 @@ define i32 @test_minsize_us32(i32 %x) minsize optsize { entry: - %0 = udiv i32 %x, 10 + %0 = udiv nof i32 %x, 10 %1 = srem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -90,7 +90,7 @@ define i32 @test_minsize_su32(i32 %x) minsize optsize { entry: - %0 = sdiv i32 %x, 10 + %0 = sdiv nof i32 %x, 10 %1 = urem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -98,7 +98,7 @@ define i64 @test_minsize_uu64(i64 %x) minsize optsize { entry: - %0 = udiv i64 %x, 10 + %0 = udiv nof i64 %x, 10 %1 = urem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -106,7 +106,7 @@ define i64 @test_minsize_ss64(i64 %x) minsize optsize { entry: - %0 = sdiv i64 %x, 10 + %0 = sdiv nof i64 %x, 10 %1 = srem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -114,7 +114,7 @@ define i64 @test_minsize_us64(i64 %x) minsize optsize { entry: - %0 = udiv i64 %x, 10 + %0 = udiv nof i64 %x, 10 %1 = srem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -122,7 +122,7 @@ define i64 @test_minsize_su64(i64 %x) minsize optsize { entry: - %0 = sdiv i64 %x, 10 + %0 = sdiv nof i64 %x, 10 %1 = urem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -130,7 +130,7 @@ define i8 @test_uu8(i8 %x) optsize { entry: - %0 = udiv i8 %x, 10 + %0 = udiv nof i8 %x, 10 %1 = urem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -138,7 +138,7 @@ define i8 @test_ss8(i8 %x) optsize { entry: - %0 = sdiv i8 %x, 10 + %0 = sdiv nof i8 %x, 10 %1 = srem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -146,7 +146,7 @@ define i8 @test_us8(i8 %x) optsize { entry: - %0 = udiv i8 %x, 10 + %0 = udiv nof i8 %x, 10 %1 = srem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -154,7 +154,7 @@ define i8 @test_su8(i8 %x) optsize { entry: - %0 = sdiv i8 %x, 10 + %0 = sdiv nof i8 %x, 10 %1 = urem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -162,7 +162,7 @@ define i16 @test_uu16(i16 %x) optsize { entry: - %0 = udiv i16 %x, 10 + %0 = udiv nof i16 %x, 10 %1 = urem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -170,7 +170,7 @@ define i16 @test_ss16(i16 %x) optsize { entry: - %0 = sdiv i16 %x, 10 + %0 = sdiv nof i16 %x, 10 %1 = srem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -178,7 +178,7 @@ define i16 @test_us16(i16 %x) optsize { entry: - %0 = udiv i16 %x, 10 + %0 = udiv nof i16 %x, 10 %1 = srem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -186,7 +186,7 @@ define i16 @test_su16(i16 %x) optsize { entry: - %0 = sdiv i16 %x, 10 + %0 = sdiv nof i16 %x, 10 %1 = urem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -194,7 +194,7 @@ define i32 @test_uu32(i32 %x) optsize { entry: - %0 = udiv i32 %x, 10 + %0 = udiv nof i32 %x, 10 %1 = urem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -202,7 +202,7 @@ define i32 @test_ss32(i32 %x) optsize { entry: - %0 = sdiv i32 %x, 10 + %0 = sdiv nof i32 %x, 10 %1 = srem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -210,7 +210,7 @@ define i32 @test_us32(i32 %x) optsize { entry: - %0 = udiv i32 %x, 10 + %0 = udiv nof i32 %x, 10 %1 = srem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -218,7 +218,7 @@ define i32 @test_su32(i32 %x) optsize { entry: - %0 = sdiv i32 %x, 10 + %0 = sdiv nof i32 %x, 10 %1 = urem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -226,7 +226,7 @@ define i64 @test_uu64(i64 %x) optsize { entry: - %0 = udiv i64 %x, 10 + %0 = udiv nof i64 %x, 10 %1 = urem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -234,7 +234,7 @@ define i64 @test_ss64(i64 %x) optsize { entry: - %0 = sdiv i64 %x, 10 + %0 = sdiv nof i64 %x, 10 %1 = srem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -242,7 +242,7 @@ define i64 @test_us64(i64 %x) optsize { entry: - %0 = udiv i64 %x, 10 + %0 = udiv nof i64 %x, 10 %1 = srem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -250,7 +250,7 @@ define i64 @test_su64(i64 %x) optsize { entry: - %0 = sdiv i64 %x, 10 + %0 = sdiv nof i64 %x, 10 %1 = urem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res Index: test/CodeGen/AArch64/sdivpow2.ll =================================================================== --- test/CodeGen/AArch64/sdivpow2.ll +++ test/CodeGen/AArch64/sdivpow2.ll @@ -7,7 +7,7 @@ ; CHECK: cmp w0, #0 ; CHECK: csel w8, w8, w0, lt ; CHECK: asr w0, w8, #3 - %div = sdiv i32 %x, 8 + %div = sdiv nof i32 %x, 8 ret i32 %div } @@ -17,7 +17,7 @@ ; CHECK: cmp w0, #0 ; CHECK: csel w8, w8, w0, lt ; CHECK: neg w0, w8, asr #3 - %div = sdiv i32 %x, -8 + %div = sdiv nof i32 %x, -8 ret i32 %div } @@ -27,7 +27,7 @@ ; CHECK: cmp w0, #0 ; CHECK: csel w8, w8, w0, lt ; CHECK: asr w0, w8, #5 - %div = sdiv i32 %x, 32 + %div = sdiv nof i32 %x, 32 ret i32 %div } @@ -37,7 +37,7 @@ ; CHECK: cmp x0, #0 ; CHECK: csel x8, x8, x0, lt ; CHECK: asr x0, x8, #3 - %div = sdiv i64 %x, 8 + %div = sdiv nof i64 %x, 8 ret i64 %div } @@ -47,7 +47,7 @@ ; CHECK: cmp x0, #0 ; CHECK: csel x8, x8, x0, lt ; CHECK: neg x0, x8, asr #3 - %div = sdiv i64 %x, -8 + %div = sdiv nof i64 %x, -8 ret i64 %div } @@ -57,7 +57,7 @@ ; CHECK: cmp x0, #0 ; CHECK: csel x8, x8, x0, lt ; CHECK: asr x0, x8, #6 - %div = sdiv i64 %x, 64 + %div = sdiv nof i64 %x, 64 ret i64 %div } @@ -68,7 +68,7 @@ ; CHECK: cmp x0, #0 ; CHECK: csel x8, x8, x0, lt ; CHECK: asr x0, x8, #48 - %div = sdiv i64 %x, 281474976710656 + %div = sdiv nof i64 %x, 281474976710656 ret i64 %div } Index: test/CodeGen/AMDGPU/lds-oqap-crash.ll =================================================================== --- test/CodeGen/AMDGPU/lds-oqap-crash.ll +++ test/CodeGen/AMDGPU/lds-oqap-crash.ll @@ -14,15 +14,15 @@ entry: %0 = load i32, i32 addrspace(3)* %in ; This block needs to be > 115 ISA instructions to hit the bug, - ; so we'll use udiv instructions. - %div0 = udiv i32 %0, %b - %div1 = udiv i32 %div0, %a - %div2 = udiv i32 %div1, 11 - %div3 = udiv i32 %div2, %a - %div4 = udiv i32 %div3, %b - %div5 = udiv i32 %div4, %c - %div6 = udiv i32 %div5, %div0 - %div7 = udiv i32 %div6, %div1 + ; so we'll use udiv nof instructions. + %div0 = udiv nof i32 %0, %b + %div1 = udiv nof i32 %div0, %a + %div2 = udiv nof i32 %div1, 11 + %div3 = udiv nof i32 %div2, %a + %div4 = udiv nof i32 %div3, %b + %div5 = udiv nof i32 %div4, %c + %div6 = udiv nof i32 %div5, %div0 + %div7 = udiv nof i32 %div6, %div1 store i32 %div7, i32 addrspace(1)* %out ret void } Index: test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll +++ test/CodeGen/AMDGPU/llvm.amdgcn.sbfe.ll @@ -402,7 +402,7 @@ define amdgpu_kernel void @simplify_demanded_bfe_sdiv(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { %src = load i32, i32 addrspace(1)* %in, align 4 %bfe = call i32 @llvm.amdgcn.sbfe.i32(i32 %src, i32 1, i32 16) - %div = sdiv i32 %bfe, 2 + %div = sdiv nof i32 %bfe, 2 store i32 %div, i32 addrspace(1)* %out, align 4 ret void } Index: test/CodeGen/AMDGPU/r600cfg.ll =================================================================== --- test/CodeGen/AMDGPU/r600cfg.ll +++ test/CodeGen/AMDGPU/r600cfg.ll @@ -72,7 +72,7 @@ br i1 %47, label %IF44, label %ELSE45 IF44: ; preds = %ENDIF40 - %49 = udiv i32 %48, 2 + %49 = udiv nof i32 %48, 2 br label %ENDIF43 ELSE45: ; preds = %ENDIF40 Index: test/CodeGen/AMDGPU/schedule-fs-loop-nested.ll =================================================================== --- test/CodeGen/AMDGPU/schedule-fs-loop-nested.ll +++ test/CodeGen/AMDGPU/schedule-fs-loop-nested.ll @@ -8,7 +8,7 @@ %tmp6 = fptosi float %tmp5 to i32 %tmp7 = bitcast i32 %tmp6 to float %tmp8 = bitcast float %tmp7 to i32 - %tmp9 = sdiv i32 %tmp8, 4 + %tmp9 = sdiv nof i32 %tmp8, 4 %tmp10 = bitcast i32 %tmp9 to float %tmp11 = bitcast float %tmp10 to i32 %tmp12 = mul i32 %tmp11, 4 Index: test/CodeGen/AMDGPU/sdiv.ll =================================================================== --- test/CodeGen/AMDGPU/sdiv.ll +++ test/CodeGen/AMDGPU/sdiv.ll @@ -3,11 +3,11 @@ ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s ; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s -; The code generated by sdiv is long and complex and may frequently change. +; The code generated by sdiv nof is long and complex and may frequently change. ; The goal of this test is to make sure the ISel doesn't fail. ; ; This program was previously failing to compile when one of the selectcc -; opcodes generated by the sdiv lowering was being legalized and optimized to: +; opcodes generated by the sdiv nof lowering was being legalized and optimized to: ; selectcc Remainder -1, 0, -1, SETGT ; This was fixed by adding an additional pattern in R600Instructions.td to ; match this pattern with a CNDGE_INT. @@ -18,7 +18,7 @@ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1 %num = load i32, i32 addrspace(1) * %in %den = load i32, i32 addrspace(1) * %den_ptr - %result = sdiv i32 %num, %den + %result = sdiv nof i32 %num, %den store i32 %result, i32 addrspace(1)* %out ret void } @@ -26,7 +26,7 @@ ; FUNC-LABEL: {{^}}sdiv_i32_4: define amdgpu_kernel void @sdiv_i32_4(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { %num = load i32, i32 addrspace(1) * %in - %result = sdiv i32 %num, 4 + %result = sdiv nof i32 %num, 4 store i32 %result, i32 addrspace(1)* %out ret void } @@ -46,7 +46,7 @@ ; SI: s_endpgm define amdgpu_kernel void @slow_sdiv_i32_3435(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { %num = load i32, i32 addrspace(1) * %in - %result = sdiv i32 %num, 3435 + %result = sdiv nof i32 %num, 3435 store i32 %result, i32 addrspace(1)* %out ret void } @@ -55,14 +55,14 @@ %den_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1 %num = load <2 x i32>, <2 x i32> addrspace(1) * %in %den = load <2 x i32>, <2 x i32> addrspace(1) * %den_ptr - %result = sdiv <2 x i32> %num, %den + %result = sdiv nof <2 x i32> %num, %den store <2 x i32> %result, <2 x i32> addrspace(1)* %out ret void } define amdgpu_kernel void @sdiv_v2i32_4(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { %num = load <2 x i32>, <2 x i32> addrspace(1) * %in - %result = sdiv <2 x i32> %num, <i32 4, i32 4> + %result = sdiv nof <2 x i32> %num, <i32 4, i32 4> store <2 x i32> %result, <2 x i32> addrspace(1)* %out ret void } @@ -71,14 +71,14 @@ %den_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1 %num = load <4 x i32>, <4 x i32> addrspace(1) * %in %den = load <4 x i32>, <4 x i32> addrspace(1) * %den_ptr - %result = sdiv <4 x i32> %num, %den + %result = sdiv nof <4 x i32> %num, %den store <4 x i32> %result, <4 x i32> addrspace(1)* %out ret void } define amdgpu_kernel void @sdiv_v4i32_4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) { %num = load <4 x i32>, <4 x i32> addrspace(1) * %in - %result = sdiv <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4> + %result = sdiv nof <4 x i32> %num, <i32 4, i32 4, i32 4, i32 4> store <4 x i32> %result, <4 x i32> addrspace(1)* %out ret void } @@ -91,7 +91,7 @@ %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1 %num = load i8, i8 addrspace(1) * %in %den = load i8, i8 addrspace(1) * %den_ptr - %result = sdiv i8 %num, %den + %result = sdiv nof i8 %num, %den %result.ext = sext i8 %result to i32 store i32 %result.ext, i32 addrspace(1)* %out ret void @@ -105,7 +105,7 @@ %den_ptr = getelementptr i23, i23 addrspace(1)* %in, i23 1 %num = load i23, i23 addrspace(1) * %in %den = load i23, i23 addrspace(1) * %den_ptr - %result = sdiv i23 %num, %den + %result = sdiv nof i23 %num, %den %result.ext = sext i23 %result to i32 store i32 %result.ext, i32 addrspace(1)* %out ret void @@ -119,7 +119,7 @@ %den_ptr = getelementptr i24, i24 addrspace(1)* %in, i24 1 %num = load i24, i24 addrspace(1) * %in %den = load i24, i24 addrspace(1) * %den_ptr - %result = sdiv i24 %num, %den + %result = sdiv nof i24 %num, %den %result.ext = sext i24 %result to i32 store i32 %result.ext, i32 addrspace(1)* %out ret void @@ -131,7 +131,7 @@ %den_ptr = getelementptr i25, i25 addrspace(1)* %in, i25 1 %num = load i25, i25 addrspace(1) * %in %den = load i25, i25 addrspace(1) * %den_ptr - %result = sdiv i25 %num, %den + %result = sdiv nof i25 %num, %den %result.ext = sext i25 %result to i32 store i32 %result.ext, i32 addrspace(1)* %out ret void @@ -139,7 +139,7 @@ ; Tests for 64-bit divide bypass. ; define amdgpu_kernel void @test_get_quotient(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind { -; %result = sdiv i64 %a, %b +; %result = sdiv nof i64 %a, %b ; store i64 %result, i64 addrspace(1)* %out, align 8 ; ret void ; } @@ -151,7 +151,7 @@ ; } ; define amdgpu_kernel void @test_get_quotient_and_remainder(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind { -; %resultdiv = sdiv i64 %a, %b +; %resultdiv = sdiv nof i64 %a, %b ; %resultrem = srem i64 %a, %b ; %result = add i64 %resultdiv, %resultrem ; store i64 %result, i64 addrspace(1)* %out, align 8 @@ -166,7 +166,7 @@ define amdgpu_kernel void @scalarize_mulhs_4xi32(<4 x i32> addrspace(1)* nocapture readonly %in, <4 x i32> addrspace(1)* nocapture %out) { %1 = load <4 x i32>, <4 x i32> addrspace(1)* %in, align 16 - %2 = sdiv <4 x i32> %1, <i32 53668, i32 53668, i32 53668, i32 53668> + %2 = sdiv nof <4 x i32> %1, <i32 53668, i32 53668, i32 53668, i32 53668> store <4 x i32> %2, <4 x i32> addrspace(1)* %out, align 16 ret void } Index: test/CodeGen/AMDGPU/sdivrem24.ll =================================================================== --- test/CodeGen/AMDGPU/sdivrem24.ll +++ test/CodeGen/AMDGPU/sdivrem24.ll @@ -16,7 +16,7 @@ %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1 %num = load i8, i8 addrspace(1) * %in %den = load i8, i8 addrspace(1) * %den_ptr - %result = sdiv i8 %num, %den + %result = sdiv nof i8 %num, %den store i8 %result, i8 addrspace(1)* %out ret void } @@ -35,7 +35,7 @@ %den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1 %num = load i16, i16 addrspace(1) * %in, align 2 %den = load i16, i16 addrspace(1) * %den_ptr, align 2 - %result = sdiv i16 %num, %den + %result = sdiv nof i16 %num, %den store i16 %result, i16 addrspace(1)* %out, align 2 ret void } @@ -58,7 +58,7 @@ %den.i24.0 = shl i32 %den, 8 %num.i24 = ashr i32 %num.i24.0, 8 %den.i24 = ashr i32 %den.i24.0, 8 - %result = sdiv i32 %num.i24, %den.i24 + %result = sdiv nof i32 %num.i24, %den.i24 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -77,7 +77,7 @@ %den.i24.0 = shl i32 %den, 7 %num.i24 = ashr i32 %num.i24.0, 7 %den.i24 = ashr i32 %den.i24.0, 7 - %result = sdiv i32 %num.i24, %den.i24 + %result = sdiv nof i32 %num.i24, %den.i24 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -96,7 +96,7 @@ %den.i24.0 = shl i32 %den, 7 %num.i24 = ashr i32 %num.i24.0, 8 %den.i24 = ashr i32 %den.i24.0, 7 - %result = sdiv i32 %num.i24, %den.i24 + %result = sdiv nof i32 %num.i24, %den.i24 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -115,7 +115,7 @@ %den.i24.0 = shl i32 %den, 8 %num.i24 = ashr i32 %num.i24.0, 7 %den.i24 = ashr i32 %den.i24.0, 8 - %result = sdiv i32 %num.i24, %den.i24 + %result = sdiv nof i32 %num.i24, %den.i24 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -214,7 +214,7 @@ %den.i25.0 = shl i32 %den, 7 %num.i24 = ashr i32 %num.i24.0, 8 %den.i25 = ashr i32 %den.i25.0, 7 - %result = sdiv i32 %num.i24, %den.i25 + %result = sdiv nof i32 %num.i24, %den.i25 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -233,7 +233,7 @@ %den.i24.0 = shl i32 %den, 8 %num.i25 = ashr i32 %num.i25.0, 7 %den.i24 = ashr i32 %den.i24.0, 8 - %result = sdiv i32 %num.i25, %den.i24 + %result = sdiv nof i32 %num.i25, %den.i24 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -331,7 +331,7 @@ %den.i12.0 = shl i32 %den, 20 %num.i17 = ashr i32 %num.i17.0, 15 %den.i12 = ashr i32 %den.i12.0, 20 - %result = sdiv i32 %num.i17, %den.i12 + %result = sdiv nof i32 %num.i17, %den.i12 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } Index: test/CodeGen/AMDGPU/sdivrem64.ll =================================================================== --- test/CodeGen/AMDGPU/sdivrem64.ll +++ test/CodeGen/AMDGPU/sdivrem64.ll @@ -44,7 +44,7 @@ ;GCN: v_mac_f32_e32 v{{[0-9]+}}, 0xcf800000 ;GCN: s_endpgm define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) { - %result = sdiv i64 %x, %y + %result = sdiv nof i64 %x, %y store i64 %result, i64 addrspace(1)* %out ret void } @@ -108,7 +108,7 @@ define amdgpu_kernel void @test_sdiv3264(i64 addrspace(1)* %out, i64 %x, i64 %y) { %1 = ashr i64 %x, 33 %2 = ashr i64 %y, 33 - %result = sdiv i64 %1, %2 + %result = sdiv nof i64 %1, %2 store i64 %result, i64 addrspace(1)* %out ret void } @@ -145,7 +145,7 @@ define amdgpu_kernel void @test_sdiv2464(i64 addrspace(1)* %out, i64 %x, i64 %y) { %1 = ashr i64 %x, 40 %2 = ashr i64 %y, 40 - %result = sdiv i64 %1, %2 + %result = sdiv nof i64 %1, %2 store i64 %result, i64 addrspace(1)* %out ret void } Index: test/CodeGen/AMDGPU/sgprcopies.ll =================================================================== --- test/CodeGen/AMDGPU/sgprcopies.ll +++ test/CodeGen/AMDGPU/sgprcopies.ll @@ -7,7 +7,7 @@ entry: %conv = call i32 @llvm.amdgcn.workitem.id.x() #1 %rem = urem i32 %conv, %width - %div = udiv i32 %conv, %width + %div = udiv nof i32 %conv, %width %conv1 = sitofp i32 %rem to float %x = tail call float @llvm.fmuladd.f32(float %xStep, float %conv1, float %xPos) %conv2 = sitofp i32 %div to float Index: test/CodeGen/AMDGPU/structurize.ll =================================================================== --- test/CodeGen/AMDGPU/structurize.ll +++ test/CodeGen/AMDGPU/structurize.ll @@ -65,15 +65,15 @@ diamond_true: %4 = phi i32 [%2, %branch_from], [%a, %diamond_head] ; This block needs to be > 100 ISA instructions to hit the bug, - ; so we'll use udiv instructions. - %div0 = udiv i32 %a, %b - %div1 = udiv i32 %div0, %4 - %div2 = udiv i32 %div1, 11 - %div3 = udiv i32 %div2, %a - %div4 = udiv i32 %div3, %b - %div5 = udiv i32 %div4, %c - %div6 = udiv i32 %div5, %div0 - %div7 = udiv i32 %div6, %div1 + ; so we'll use udiv nof instructions. + %div0 = udiv nof i32 %a, %b + %div1 = udiv nof i32 %div0, %4 + %div2 = udiv nof i32 %div1, 11 + %div3 = udiv nof i32 %div2, %a + %div4 = udiv nof i32 %div3, %b + %div5 = udiv nof i32 %div4, %c + %div6 = udiv nof i32 %div5, %div0 + %div7 = udiv nof i32 %div6, %div1 br label %done done: Index: test/CodeGen/AMDGPU/udiv.ll =================================================================== --- test/CodeGen/AMDGPU/udiv.ll +++ test/CodeGen/AMDGPU/udiv.ll @@ -14,7 +14,7 @@ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1 %a = load i32, i32 addrspace(1)* %in %b = load i32, i32 addrspace(1)* %b_ptr - %result = udiv i32 %a, %b + %result = udiv nof i32 %a, %b store i32 %result, i32 addrspace(1)* %out ret void } @@ -22,13 +22,13 @@ ; FUNC-LABEL: {{^}}s_udiv_i32: ; SI: v_rcp_iflag_f32_e32 define amdgpu_kernel void @s_udiv_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) { - %result = udiv i32 %a, %b + %result = udiv nof i32 %a, %b store i32 %result, i32 addrspace(1)* %out ret void } -; The code generated by udiv is long and complex and may frequently +; The code generated by udiv nof is long and complex and may frequently ; change. The goal of this test is to make sure the ISel doesn't fail ; when it gets a v4i32 udiv @@ -42,7 +42,7 @@ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1 %a = load <2 x i32>, <2 x i32> addrspace(1) * %in %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr - %result = udiv <2 x i32> %a, %b + %result = udiv nof <2 x i32> %a, %b store <2 x i32> %result, <2 x i32> addrspace(1)* %out ret void } @@ -54,7 +54,7 @@ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1 %a = load <4 x i32>, <4 x i32> addrspace(1) * %in %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr - %result = udiv <4 x i32> %a, %b + %result = udiv nof <4 x i32> %a, %b store <4 x i32> %result, <4 x i32> addrspace(1)* %out ret void } @@ -66,7 +66,7 @@ define amdgpu_kernel void @udiv_i32_div_pow2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1 %a = load i32, i32 addrspace(1)* %in - %result = udiv i32 %a, 16 + %result = udiv nof i32 %a, 16 store i32 %result, i32 addrspace(1)* %out ret void } @@ -80,7 +80,7 @@ define amdgpu_kernel void @udiv_i32_div_k_even(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1 %a = load i32, i32 addrspace(1)* %in - %result = udiv i32 %a, 34259182 + %result = udiv nof i32 %a, 34259182 store i32 %result, i32 addrspace(1)* %out ret void } @@ -94,7 +94,7 @@ define amdgpu_kernel void @udiv_i32_div_k_odd(i32 addrspace(1)* %out, i32 addrspace(1)* %in) { %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1 %a = load i32, i32 addrspace(1)* %in - %result = udiv i32 %a, 34259183 + %result = udiv nof i32 %a, 34259183 store i32 %result, i32 addrspace(1)* %out ret void } @@ -107,7 +107,7 @@ %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1 %num = load i8, i8 addrspace(1) * %in %den = load i8, i8 addrspace(1) * %den_ptr - %result = udiv i8 %num, %den + %result = udiv nof i8 %num, %den %result.ext = zext i8 %result to i32 store i32 %result.ext, i32 addrspace(1)* %out ret void @@ -121,7 +121,7 @@ %den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1 %num = load i16, i16 addrspace(1) * %in %den = load i16, i16 addrspace(1) * %den_ptr - %result = udiv i16 %num, %den + %result = udiv nof i16 %num, %den %result.ext = zext i16 %result to i32 store i32 %result.ext, i32 addrspace(1)* %out ret void @@ -135,7 +135,7 @@ %den_ptr = getelementptr i23, i23 addrspace(1)* %in, i23 1 %num = load i23, i23 addrspace(1) * %in %den = load i23, i23 addrspace(1) * %den_ptr - %result = udiv i23 %num, %den + %result = udiv nof i23 %num, %den %result.ext = zext i23 %result to i32 store i32 %result.ext, i32 addrspace(1)* %out ret void @@ -147,7 +147,7 @@ %den_ptr = getelementptr i24, i24 addrspace(1)* %in, i24 1 %num = load i24, i24 addrspace(1) * %in %den = load i24, i24 addrspace(1) * %den_ptr - %result = udiv i24 %num, %den + %result = udiv nof i24 %num, %den %result.ext = zext i24 %result to i32 store i32 %result.ext, i32 addrspace(1)* %out ret void @@ -161,7 +161,7 @@ define amdgpu_kernel void @scalarize_mulhu_4xi32(<4 x i32> addrspace(1)* nocapture readonly %in, <4 x i32> addrspace(1)* nocapture %out) { %1 = load <4 x i32>, <4 x i32> addrspace(1)* %in, align 16 - %2 = udiv <4 x i32> %1, <i32 53668, i32 53668, i32 53668, i32 53668> + %2 = udiv nof <4 x i32> %1, <i32 53668, i32 53668, i32 53668, i32 53668> store <4 x i32> %2, <4 x i32> addrspace(1)* %out, align 16 ret void } @@ -169,7 +169,7 @@ ; FUNC-LABEL: {{^}}test_udiv2: ; SI: s_lshr_b32 s{{[0-9]}}, s{{[0-9]}}, 1 define amdgpu_kernel void @test_udiv2(i32 %p) { - %i = udiv i32 %p, 2 + %i = udiv nof i32 %p, 2 store volatile i32 %i, i32 addrspace(1)* undef ret void } @@ -179,7 +179,7 @@ ; SI: v_mul_hi_u32 v0, {{s[0-9]+}}, {{v[0-9]+}} ; SI-NEXT: v_lshrrev_b32_e32 v0, 1, v0 define amdgpu_kernel void @test_udiv_3_mulhu(i32 %p) { - %i = udiv i32 %p, 3 + %i = udiv nof i32 %p, 3 store volatile i32 %i, i32 addrspace(1)* undef ret void } @@ -193,7 +193,7 @@ %tmp2 = getelementptr inbounds i8, i8 addrspace(1)* %arg, i64 undef %tmp3 = load i8, i8 addrspace(1)* %tmp2, align 1 %tmp4 = sext i8 %tmp3 to i32 - %tmp5 = sdiv i32 %tmp1, %tmp4 + %tmp5 = sdiv nof i32 %tmp1, %tmp4 %tmp6 = trunc i32 %tmp5 to i8 store i8 %tmp6, i8 addrspace(1)* null, align 1 ret void Index: test/CodeGen/AMDGPU/udivrem.ll =================================================================== --- test/CodeGen/AMDGPU/udivrem.ll +++ test/CodeGen/AMDGPU/udivrem.ll @@ -52,7 +52,7 @@ ; SI-DAG: v_cndmask_b32_e64 ; SI: s_endpgm define amdgpu_kernel void @test_udivrem(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 %x, i32 %y) { - %result0 = udiv i32 %x, %y + %result0 = udiv nof i32 %x, %y store i32 %result0, i32 addrspace(1)* %out0 %result1 = urem i32 %x, %y store i32 %result1, i32 addrspace(1)* %out1 @@ -159,7 +159,7 @@ ; SI-DAG: v_cndmask_b32_e64 ; SI: s_endpgm define amdgpu_kernel void @test_udivrem_v2(<2 x i32> addrspace(1)* %out, <2 x i32> %x, <2 x i32> %y) { - %result0 = udiv <2 x i32> %x, %y + %result0 = udiv nof <2 x i32> %x, %y store <2 x i32> %result0, <2 x i32> addrspace(1)* %out %result1 = urem <2 x i32> %x, %y store <2 x i32> %result1, <2 x i32> addrspace(1)* %out @@ -341,7 +341,7 @@ ; SI-DAG: v_cndmask_b32_e64 ; SI: s_endpgm define amdgpu_kernel void @test_udivrem_v4(<4 x i32> addrspace(1)* %out, <4 x i32> %x, <4 x i32> %y) { - %result0 = udiv <4 x i32> %x, %y + %result0 = udiv nof <4 x i32> %x, %y store <4 x i32> %result0, <4 x i32> addrspace(1)* %out %result1 = urem <4 x i32> %x, %y store <4 x i32> %result1, <4 x i32> addrspace(1)* %out Index: test/CodeGen/AMDGPU/udivrem24.ll =================================================================== --- test/CodeGen/AMDGPU/udivrem24.ll +++ test/CodeGen/AMDGPU/udivrem24.ll @@ -16,7 +16,7 @@ %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1 %num = load i8, i8 addrspace(1) * %in %den = load i8, i8 addrspace(1) * %den_ptr - %result = udiv i8 %num, %den + %result = udiv nof i8 %num, %den store i8 %result, i8 addrspace(1)* %out ret void } @@ -35,7 +35,7 @@ %den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1 %num = load i16, i16 addrspace(1) * %in, align 2 %den = load i16, i16 addrspace(1) * %den_ptr, align 2 - %result = udiv i16 %num, %den + %result = udiv nof i16 %num, %den store i16 %result, i16 addrspace(1)* %out, align 2 ret void } @@ -58,7 +58,7 @@ %den.i23.0 = shl i32 %den, 9 %num.i23 = lshr i32 %num.i23.0, 9 %den.i23 = lshr i32 %den.i23.0, 9 - %result = udiv i32 %num.i23, %den.i23 + %result = udiv nof i32 %num.i23, %den.i23 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -75,7 +75,7 @@ %den.i24.0 = shl i32 %den, 8 %num.i24 = lshr i32 %num.i24.0, 8 %den.i24 = lshr i32 %den.i24.0, 8 - %result = udiv i32 %num.i24, %den.i24 + %result = udiv nof i32 %num.i24, %den.i24 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -92,7 +92,7 @@ %den.i24.0 = shl i32 %den, 8 %num.i23 = lshr i32 %num.i23.0, 9 %den.i24 = lshr i32 %den.i24.0, 8 - %result = udiv i32 %num.i23, %den.i24 + %result = udiv nof i32 %num.i23, %den.i24 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -109,7 +109,7 @@ %den.i23.0 = shl i32 %den, 9 %num.i24 = lshr i32 %num.i24.0, 8 %den.i23 = lshr i32 %den.i23.0, 9 - %result = udiv i32 %num.i24, %den.i23 + %result = udiv nof i32 %num.i24, %den.i23 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -129,7 +129,7 @@ %den.i25.0 = shl i32 %den, 7 %num.i25 = lshr i32 %num.i25.0, 7 %den.i25 = lshr i32 %den.i25.0, 7 - %result = udiv i32 %num.i25, %den.i25 + %result = udiv nof i32 %num.i25, %den.i25 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -149,7 +149,7 @@ %den.i24.0 = shl i32 %den, 7 %num.i24 = lshr i32 %num.i24.0, 8 %den.i24 = lshr i32 %den.i24.0, 7 - %result = udiv i32 %num.i24, %den.i24 + %result = udiv nof i32 %num.i24, %den.i24 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -169,7 +169,7 @@ %den.i24.0 = shl i32 %den, 8 %num.i24 = lshr i32 %num.i24.0, 7 %den.i24 = lshr i32 %den.i24.0, 8 - %result = udiv i32 %num.i24, %den.i24 + %result = udiv nof i32 %num.i24, %den.i24 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -302,7 +302,7 @@ %den.i23.0 = shl i32 %den, 9 %num.i16 = lshr i32 %num.i16.0, 16 %den.i23 = lshr i32 %den.i23.0, 9 - %result = udiv i32 %num.i16, %den.i23 + %result = udiv nof i32 %num.i16, %den.i23 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } @@ -321,7 +321,7 @@ %den.i16.0 = shl i32 %den, 16 %num.i23 = lshr i32 %num.i23.0, 9 %den.i16 = lshr i32 %den.i16.0, 16 - %result = udiv i32 %num.i23, %den.i16 + %result = udiv nof i32 %num.i23, %den.i16 store i32 %result, i32 addrspace(1)* %out, align 4 ret void } Index: test/CodeGen/AMDGPU/udivrem64.ll =================================================================== --- test/CodeGen/AMDGPU/udivrem64.ll +++ test/CodeGen/AMDGPU/udivrem64.ll @@ -45,7 +45,7 @@ ;GCN: v_mac_f32_e32 v{{[0-9]+}}, 0xcf800000 ;GCN: s_endpgm define amdgpu_kernel void @test_udiv(i64 addrspace(1)* %out, i64 %x, i64 %y) { - %result = udiv i64 %x, %y + %result = udiv nof i64 %x, %y store i64 %result, i64 addrspace(1)* %out ret void } @@ -109,7 +109,7 @@ define amdgpu_kernel void @test_udiv3264(i64 addrspace(1)* %out, i64 %x, i64 %y) { %1 = lshr i64 %x, 33 %2 = lshr i64 %y, 33 - %result = udiv i64 %1, %2 + %result = udiv nof i64 %1, %2 store i64 %result, i64 addrspace(1)* %out ret void } @@ -145,7 +145,7 @@ define amdgpu_kernel void @test_udiv2364(i64 addrspace(1)* %out, i64 %x, i64 %y) { %1 = lshr i64 %x, 41 %2 = lshr i64 %y, 41 - %result = udiv i64 %1, %2 + %result = udiv nof i64 %1, %2 store i64 %result, i64 addrspace(1)* %out ret void } Index: test/CodeGen/ARM/2007-05-14-RegScavengerAssert.ll =================================================================== --- test/CodeGen/ARM/2007-05-14-RegScavengerAssert.ll +++ test/CodeGen/ARM/2007-05-14-RegScavengerAssert.ll @@ -19,7 +19,7 @@ br i1 false, label %bb59, label %bb bb59: ; preds = %bb - %tmp68 = sdiv i64 0, 0 ; <i64> [#uses=1] + %tmp68 = sdiv nof i64 0, 0 ; <i64> [#uses=1] %tmp6869 = trunc i64 %tmp68 to i32 ; <i32> [#uses=2] %tmp81 = call i32 asm "smull $0, $1, $2, $3 \0A\09mov $0, $0, lsr $4\0A\09add $1, $0, $1, lsl $5\0A\09", "=&r,=*&r,r,r,i,i"( i32* null, i32 %tmp6869, i32 13316085, i32 23, i32 9 ) ; <i32> [#uses=0] %tmp90 = call i32 asm "smull $0, $1, $2, $3 \0A\09mov $0, $0, lsr $4\0A\09add $1, $0, $1, lsl $5\0A\09", "=&r,=*&r,r,r,i,i"( i32* null, i32 %tmp6869, i32 10568984, i32 23, i32 9 ) ; <i32> [#uses=0] Index: test/CodeGen/ARM/2010-06-21-LdStMultipleBug.ll =================================================================== --- test/CodeGen/ARM/2010-06-21-LdStMultipleBug.ll +++ test/CodeGen/ARM/2010-06-21-LdStMultipleBug.ll @@ -41,13 +41,13 @@ unreachable bb13: ; preds = %bb11 - %iftmp.40.0.neg = sdiv i32 0, -2 ; <i32> [#uses=2] + %iftmp.40.0.neg = sdiv nof i32 0, -2 ; <i32> [#uses=2] %12 = sub nsw i32 0, %9 ; <i32> [#uses=1] %13 = sitofp i32 %12 to double ; <double> [#uses=1] %14 = fdiv double %13, 0.000000e+00 ; <double> [#uses=1] %15 = fptosi double %14 to i32 ; <i32> [#uses=1] %iftmp.41.0.in = add i32 0, %15 ; <i32> [#uses=1] - %iftmp.41.0.neg = sdiv i32 %iftmp.41.0.in, -2 ; <i32> [#uses=3] + %iftmp.41.0.neg = sdiv nof i32 %iftmp.41.0.in, -2 ; <i32> [#uses=3] br i1 undef, label %bb43.loopexit, label %bb21 bb21: ; preds = %bb13 Index: test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll =================================================================== --- test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll +++ test/CodeGen/ARM/2011-02-04-AntidepMultidef.ll @@ -28,7 +28,7 @@ ; CHECK: umull [[REGISTER:lr|r[0-9]+]], ; CHECK-NOT: [[REGISTER]], ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}} - %3 = udiv i32 %2, 10 + %3 = udiv nof i32 %2, 10 %4 = urem i32 %3, 10 %5 = icmp ult i32 %4, 10 %6 = trunc i32 %4 to i8 @@ -42,7 +42,7 @@ ; CHECK: umull [[REGISTER:lr|r[0-9]+]], ; CHECK-NOT: [[REGISTER]], ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}} - %9 = udiv i32 %2, 100 + %9 = udiv nof i32 %2, 100 %10 = urem i32 %9, 10 %11 = icmp ult i32 %10, 10 %12 = trunc i32 %10 to i8 @@ -56,7 +56,7 @@ ; CHECK: umull [[REGISTER:lr|r[0-9]+]], ; CHECK-NOT: [[REGISTER]], ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}} - %15 = udiv i32 %2, 10000 + %15 = udiv nof i32 %2, 10000 %16 = urem i32 %15, 10 %17 = icmp ult i32 %16, 10 %18 = trunc i32 %16 to i8 @@ -70,7 +70,7 @@ ; CHECK: umull [[REGISTER:lr|r[0-9]+]], ; CHECK-NOT: [[REGISTER]], ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}} - %21 = udiv i32 %2, 100000 + %21 = udiv nof i32 %2, 100000 %22 = urem i32 %21, 10 %23 = icmp ult i32 %22, 10 %iftmp.5.0.5 = select i1 %23, i8 0, i8 %val8 @@ -81,7 +81,7 @@ ; CHECK: umull [[REGISTER:lr|r[0-9]+]], ; CHECK-NOT: [[REGISTER]], ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}} - %24 = udiv i32 %2, 1000000 + %24 = udiv nof i32 %2, 1000000 %25 = urem i32 %24, 10 %26 = icmp ult i32 %25, 10 %27 = trunc i32 %25 to i8 @@ -95,7 +95,7 @@ ; CHECK: umull [[REGISTER:lr|r[0-9]+]], ; CHECK-NOT: [[REGISTER]], ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}} - %30 = udiv i32 %2, 10000000 + %30 = udiv nof i32 %2, 10000000 %31 = urem i32 %30, 10 %32 = icmp ult i32 %31, 10 %33 = trunc i32 %31 to i8 @@ -109,7 +109,7 @@ ; CHECK: umull [[REGISTER:lr|r[0-9]+]], ; CHECK-NOT: [[REGISTER]], ; CHECK: {{lr|r[0-9]+}}, {{lr|r[0-9]+$}} - %36 = udiv i32 %2, 100000000 + %36 = udiv nof i32 %2, 100000000 %37 = urem i32 %36, 10 %38 = icmp ult i32 %37, 10 %39 = trunc i32 %37 to i8 Index: test/CodeGen/ARM/2011-08-29-SchedCycle.ll =================================================================== --- test/CodeGen/ARM/2011-08-29-SchedCycle.ll +++ test/CodeGen/ARM/2011-08-29-SchedCycle.ll @@ -33,7 +33,7 @@ define void @t() nounwind { entry: %tmp = load i64, i64* undef, align 4 - %tmp5 = udiv i64 %tmp, 30 + %tmp5 = udiv nof i64 %tmp, 30 %tmp13 = and i64 %tmp5, 64739244643450880 %tmp16 = sub i64 0, %tmp13 %tmp19 = and i64 %tmp16, 63 Index: test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll =================================================================== --- test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll +++ test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll @@ -11,7 +11,7 @@ br label %bb17 bb25.lr.ph: ; preds = %entry - %0 = sdiv i32 undef, 2 + %0 = sdiv nof i32 undef, 2 br label %bb5.i bb.i: ; preds = %bb5.i Index: test/CodeGen/ARM/2011-09-09-OddVectorDivision.ll =================================================================== --- test/CodeGen/ARM/2011-09-09-OddVectorDivision.ll +++ test/CodeGen/ARM/2011-09-09-OddVectorDivision.ll @@ -13,11 +13,11 @@ define void @f() { %1 = load <3 x i16>, <3 x i16>* @x1 %2 = load <3 x i16>, <3 x i16>* @y1 - %3 = sdiv <3 x i16> %1, %2 + %3 = sdiv nof <3 x i16> %1, %2 store <3 x i16> %3, <3 x i16>* @z1 %4 = load <4 x i16>, <4 x i16>* @x2 %5 = load <4 x i16>, <4 x i16>* @y2 - %6 = sdiv <4 x i16> %4, %5 + %6 = sdiv nof <4 x i16> %4, %5 store <4 x i16> %6, <4 x i16>* @z2 ret void } Index: test/CodeGen/ARM/2012-05-04-vmov.ll =================================================================== --- test/CodeGen/ARM/2012-05-04-vmov.ll +++ test/CodeGen/ARM/2012-05-04-vmov.ll @@ -8,7 +8,7 @@ define <2 x i32> @testuvec(<2 x i32> %A, <2 x i32> %B) nounwind { entry: - %div = udiv <2 x i32> %A, %B + %div = udiv nof <2 x i32> %A, %B ret <2 x i32> %div ; A9-CHECK: vmov.32 ; vmov.32 should not be used to get a lane: Index: test/CodeGen/ARM/GlobalISel/arm-isel-divmod.ll =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-isel-divmod.ll +++ test/CodeGen/ARM/GlobalISel/arm-isel-divmod.ll @@ -8,7 +8,7 @@ ; HWDIV: sdiv ; SOFT-AEABI: bl __aeabi_idiv ; SOFT-DEFAULT: bl __divsi3 - %r = sdiv i32 %a, %b + %r = sdiv nof i32 %a, %b ret i32 %r } @@ -17,7 +17,7 @@ ; HWDIV: udiv ; SOFT-AEABI: bl __aeabi_uidiv ; SOFT-DEFAULT: bl __udivsi3 - %r = udiv i32 %a, %b + %r = udiv nof i32 %a, %b ret i32 %r } @@ -26,7 +26,7 @@ ; HWDIV: sdiv ; SOFT-AEABI: bl __aeabi_idiv ; SOFT-DEFAULT: bl __divsi3 - %r = sdiv i16 %a, %b + %r = sdiv nof i16 %a, %b ret i16 %r } @@ -35,7 +35,7 @@ ; HWDIV: udiv ; SOFT-AEABI: bl __aeabi_uidiv ; SOFT-DEFAULT: bl __udivsi3 - %r = udiv i16 %a, %b + %r = udiv nof i16 %a, %b ret i16 %r } @@ -44,7 +44,7 @@ ; HWDIV: sdiv ; SOFT-AEABI: bl __aeabi_idiv ; SOFT-DEFAULT: bl __divsi3 - %r = sdiv i8 %a, %b + %r = sdiv nof i8 %a, %b ret i8 %r } @@ -53,7 +53,7 @@ ; HWDIV: udiv ; SOFT-AEABI: bl __aeabi_uidiv ; SOFT-DEFAULT: bl __udivsi3 - %r = udiv i8 %a, %b + %r = udiv nof i8 %a, %b ret i8 %r } Index: test/CodeGen/ARM/Windows/dbzchk.ll =================================================================== --- test/CodeGen/ARM/Windows/dbzchk.ll +++ test/CodeGen/ARM/Windows/dbzchk.ll @@ -15,7 +15,7 @@ store i32 %d, i32* %d.addr, align 4 %0 = load i32, i32* %n.addr, align 4 %1 = load i32, i32* %d.addr, align 4 - %div = sdiv i32 %0, %1 + %div = sdiv nof i32 %0, %1 %tobool = icmp ne i32 %div, 0 br i1 %tobool, label %if.then, label %if.end Index: test/CodeGen/ARM/Windows/division-range.ll =================================================================== --- test/CodeGen/ARM/Windows/division-range.ll +++ test/CodeGen/ARM/Windows/division-range.ll @@ -4,7 +4,7 @@ define arm_aapcs_vfpcc i32 @f(i32 %n, i32 %d) local_unnamed_addr { entry: - %div = sdiv i32 %n, %d + %div = sdiv nof i32 %n, %d call i32 @llvm.arm.space(i32 128, i32 undef) ret i32 %div } Index: test/CodeGen/ARM/Windows/division.ll =================================================================== --- test/CodeGen/ARM/Windows/division.ll +++ test/CodeGen/ARM/Windows/division.ll @@ -3,7 +3,7 @@ define arm_aapcs_vfpcc i32 @sdiv32(i32 %divisor, i32 %divident) { entry: - %div = sdiv i32 %divident, %divisor + %div = sdiv nof i32 %divident, %divisor ret i32 %div } @@ -14,7 +14,7 @@ define arm_aapcs_vfpcc i32 @udiv32(i32 %divisor, i32 %divident) { entry: - %div = udiv i32 %divident, %divisor + %div = udiv nof i32 %divident, %divisor ret i32 %div } @@ -25,7 +25,7 @@ define arm_aapcs_vfpcc i64 @sdiv64(i64 %divisor, i64 %divident) { entry: - %div = sdiv i64 %divident, %divisor + %div = sdiv nof i64 %divident, %divisor ret i64 %div } @@ -37,7 +37,7 @@ define arm_aapcs_vfpcc i64 @udiv64(i64 %divisor, i64 %divident) { entry: - %div = udiv i64 %divident, %divisor + %div = udiv nof i64 %divident, %divisor ret i64 %div } Index: test/CodeGen/ARM/Windows/no-aeabi.ll =================================================================== --- test/CodeGen/ARM/Windows/no-aeabi.ll +++ test/CodeGen/ARM/Windows/no-aeabi.ll @@ -24,7 +24,7 @@ define i32 @divide(i32 %i, i32 %j) nounwind { entry: - %quotient = sdiv i32 %i, %j + %quotient = sdiv nof i32 %i, %j ret i32 %quotient } Index: test/CodeGen/ARM/adv-copy-opt.ll =================================================================== --- test/CodeGen/ARM/adv-copy-opt.ll +++ test/CodeGen/ARM/adv-copy-opt.ll @@ -33,6 +33,6 @@ ; OPT-NEXT: bx lr define <2 x i32> @simpleVectorDiv(<2 x i32> %A, <2 x i32> %B) nounwind { entry: - %div = udiv <2 x i32> %A, %B + %div = udiv nof <2 x i32> %A, %B ret <2 x i32> %div } Index: test/CodeGen/ARM/call-tc.ll =================================================================== --- test/CodeGen/ARM/call-tc.ll +++ test/CodeGen/ARM/call-tc.ll @@ -72,7 +72,7 @@ ; CHECKV6: b ___divsi3 ; CHECKELF-LABEL: t6: ; CHECKELF: b __aeabi_idiv - %0 = sdiv i32 %a, %b + %0 = sdiv nof i32 %a, %b ret i32 %0 } @@ -156,7 +156,7 @@ %lock = alloca %class.MutexLock, align 1 %1 = call %class.MutexLock* @_ZN9MutexLockC1Ev(%class.MutexLock* %lock) %2 = load i32, i32* @x, align 4 - %3 = sdiv i32 1000, %2 + %3 = sdiv nof i32 1000, %2 %4 = call %class.MutexLock* @_ZN9MutexLockD1Ev(%class.MutexLock* %lock) ret i32 %3 } Index: test/CodeGen/ARM/cortex-a57-misched-basic.ll =================================================================== --- test/CodeGen/ARM/cortex-a57-misched-basic.ll +++ test/CodeGen/ARM/cortex-a57-misched-basic.ll @@ -46,7 +46,7 @@ %xor = xor i32 %c, %b %ld = load i32, i32* %d %add = add nsw i32 %xor, %ld - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b %sub = sub i32 %div, %add ret i32 %sub } Index: test/CodeGen/ARM/cortexr52-misched-basic.ll =================================================================== --- test/CodeGen/ARM/cortexr52-misched-basic.ll +++ test/CodeGen/ARM/cortexr52-misched-basic.ll @@ -33,7 +33,7 @@ %xor = xor i32 %c, %b %mul = mul nsw i32 %xor, %c %add = add nsw i32 %mul, %a - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b %sub = sub i32 %add, %div ret i32 %sub } Index: test/CodeGen/ARM/data-in-code-annotations.ll =================================================================== --- test/CodeGen/ARM/data-in-code-annotations.ll +++ test/CodeGen/ARM/data-in-code-annotations.ll @@ -33,7 +33,7 @@ br label %return sw.bb20: ; preds = %entry - %div = sdiv i32 undef, undef + %div = sdiv nof i32 undef, undef br label %return return: ; preds = %sw.bb20, %sw.bb13, %sw.bb6, %sw.bb, %entry Index: test/CodeGen/ARM/div.ll =================================================================== --- test/CodeGen/ARM/div.ll +++ test/CodeGen/ARM/div.ll @@ -25,7 +25,7 @@ ; CHECK-HWDIV: sdiv ; CHECK-EABI: __aeabi_idiv - %tmp1 = sdiv i32 %a, %b ; <i32> [#uses=1] + %tmp1 = sdiv nof i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } @@ -38,7 +38,7 @@ ; CHECK-HWDIV: udiv ; CHECK-EABI: __aeabi_uidiv - %tmp1 = udiv i32 %a, %b ; <i32> [#uses=1] + %tmp1 = udiv nof i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } Index: test/CodeGen/ARM/divmod-eabi.ll =================================================================== --- test/CodeGen/ARM/divmod-eabi.ll +++ test/CodeGen/ARM/divmod-eabi.ll @@ -29,7 +29,7 @@ entry: %conv = sext i16 %a to i32 %conv1 = sext i16 %b to i32 - %div = sdiv i32 %conv, %conv1 + %div = sdiv nof i32 %conv, %conv1 %rem = srem i32 %conv, %conv1 ; EABI: __aeabi_idivmod ; EABI: mov [[div:r[0-9]+]], r0 @@ -64,7 +64,7 @@ ; DARWIN-O0-LABEL: f32: ; WINDOWS-LABEL: f32: entry: - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b %rem = srem i32 %a, %b ; EABI: __aeabi_idivmod ; EABI: mov [[div:r[0-9]+]], r0 @@ -96,7 +96,7 @@ ; DARWIN-O0-LABEL: uf: ; WINDOWS-LABEL: uf: entry: - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b %rem = urem i32 %a, %b ; EABI: __aeabi_uidivmod ; DARWIN: __udivmodsi4 @@ -126,7 +126,7 @@ ; DARWIN-O0-LABEL: longf: ; WINDOWS-LABEL: longf: entry: - %div = sdiv i64 %a, %b + %div = sdiv nof i64 %a, %b %rem = srem i64 %a, %b ; EABI: __aeabi_ldivmod ; EABI-NEXT: adds r0 @@ -153,7 +153,7 @@ ; DARWIN-O0-LABEL: shortf: ; WINDOWS-LABEL: shortf: entry: - %div = sdiv i16 %a, %b + %div = sdiv nof i16 %a, %b %rem = srem i16 %a, %b ; EABI: __aeabi_idivmod ; DARWIN: ___divmodsi4 @@ -173,7 +173,7 @@ ; DARWIN-O0-LABEL: g1: ; WINDOWS-LABEL: g1: entry: - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b %rem = srem i32 %a, %b ; EABI: __aeabi_idivmod ; DARWIN: ___divmodsi4 @@ -237,7 +237,7 @@ ; DARWIN-O0-LABEL: g4: ; WINDOWS-LABEL: g4: entry: - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b ; EABI: __aeabi_idiv{{$}} ; EABI: mov [[div:r[0-9]+]], r0 ; DARWIN: ___divsi3 Index: test/CodeGen/ARM/divmod.ll =================================================================== --- test/CodeGen/ARM/divmod.ll +++ test/CodeGen/ARM/divmod.ll @@ -14,7 +14,7 @@ ; SWIFT: sdiv ; SWIFT: mls ; SWIFT-NOT: bl __divmodsi4 - %div = sdiv i32 %x, %y + %div = sdiv nof i32 %x, %y store i32 %div, i32* %P, align 4 %rem = srem i32 %x, %y %arrayidx6 = getelementptr inbounds i32, i32* %P, i32 1 @@ -32,7 +32,7 @@ ; SWIFT: udiv ; SWIFT: mls ; SWIFT-NOT: bl __udivmodsi4 - %div = udiv i32 %x, %y + %div = udiv nof i32 %x, %y store i32 %div, i32* %P, align 4 %rem = urem i32 %x, %y %arrayidx6 = getelementptr inbounds i32, i32* %P, i32 1 @@ -60,7 +60,7 @@ ; SWIFT-NOT: bl __divmodsi4 %3 = load i32, i32* @tabsize, align 4 %4 = srem i32 %cols, %3 - %5 = sdiv i32 %cols, %3 + %5 = sdiv nof i32 %cols, %3 %6 = tail call i32 @llvm.objectsize.i32.p0i8(i8* null, i1 false) %7 = tail call i8* @__memset_chk(i8* null, i32 9, i32 %5, i32 %6) nounwind br label %bb1 @@ -87,7 +87,7 @@ ; SWIFT: mls ; SWIFT-NOT: bl __udivmodsi4 %rem = urem i32 %x, %y - %div = udiv i32 %x, %y + %div = udiv nof i32 %x, %y %not.cmp = icmp ne i32 %rem, 0 %add = zext i1 %not.cmp to i32 %cond = add i32 %add, %div Index: test/CodeGen/ARM/fast-isel-call.ll =================================================================== --- test/CodeGen/ARM/fast-isel-call.ll +++ test/CodeGen/ARM/fast-isel-call.ll @@ -188,7 +188,7 @@ ; THUMB-LONG: {{(movt r2, :upper16:L___udivsi3\$non_lazy_ptr)?}} ; THUMB-LONG: ldr r2, [r2] ; THUMB-LONG: blx r2 - %tmp1 = udiv i32 %a, %b ; <i32> [#uses=1] + %tmp1 = udiv nof i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } Index: test/CodeGen/ARM/jumptable-label.ll =================================================================== --- test/CodeGen/ARM/jumptable-label.ll +++ test/CodeGen/ARM/jumptable-label.ll @@ -24,7 +24,7 @@ br label %return sw.bb20: ; preds = %entry - %div = sdiv i32 undef, undef + %div = sdiv nof i32 undef, undef br label %return return: ; preds = %sw.bb20, %sw.bb13, %sw.bb6, %sw.bb, %entry Index: test/CodeGen/ARM/krait-cpu-div-attribute.ll =================================================================== --- test/CodeGen/ARM/krait-cpu-div-attribute.ll +++ test/CodeGen/ARM/krait-cpu-div-attribute.ll @@ -30,7 +30,7 @@ store volatile i32 32, i32* %c, align 4 %0 = load volatile i32, i32* %b, align 4 %1 = load volatile i32, i32* %c, align 4 - %div = sdiv i32 %0, %1 + %div = sdiv nof i32 %0, %1 store volatile i32 %div, i32* %a, align 4 ret i32 0 } Index: test/CodeGen/ARM/local-call.ll =================================================================== --- test/CodeGen/ARM/local-call.ll +++ test/CodeGen/ARM/local-call.ll @@ -15,6 +15,6 @@ ; CHECK-LABEL: test_local_call: ; CHECK: bl ___udivdi3 -%res = udiv i64 %a, %b +%res = udiv nof i64 %a, %b ret i64 %res } Index: test/CodeGen/ARM/mulhi.ll =================================================================== --- test/CodeGen/ARM/mulhi.ll +++ test/CodeGen/ARM/mulhi.ll @@ -49,6 +49,6 @@ ; M3: smull entry: %tmp1 = mul nsw i32 %a, 3 - %tmp2 = sdiv i32 %tmp1, 23 + %tmp2 = sdiv nof i32 %tmp1, 23 ret i32 %tmp2 } Index: test/CodeGen/ARM/neon_div.ll =================================================================== --- test/CodeGen/ARM/neon_div.ll +++ test/CodeGen/ARM/neon_div.ll @@ -4,7 +4,7 @@ define <8 x i8> @sdivi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load <8 x i8>, <8 x i8>* %B - %tmp3 = sdiv <8 x i8> %tmp1, %tmp2 + %tmp3 = sdiv nof <8 x i8> %tmp1, %tmp2 ret <8 x i8> %tmp3 } @@ -18,7 +18,7 @@ define <8 x i8> @udivi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load <8 x i8>, <8 x i8>* %B - %tmp3 = udiv <8 x i8> %tmp1, %tmp2 + %tmp3 = udiv nof <8 x i8> %tmp1, %tmp2 ret <8 x i8> %tmp3 } @@ -34,7 +34,7 @@ define <4 x i16> @sdivi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = load <4 x i16>, <4 x i16>* %B - %tmp3 = sdiv <4 x i16> %tmp1, %tmp2 + %tmp3 = sdiv nof <4 x i16> %tmp1, %tmp2 ret <4 x i16> %tmp3 } @@ -46,7 +46,7 @@ define <4 x i16> @udivi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = load <4 x i16>, <4 x i16>* %B - %tmp3 = udiv <4 x i16> %tmp1, %tmp2 + %tmp3 = udiv nof <4 x i16> %tmp1, %tmp2 ret <4 x i16> %tmp3 } Index: test/CodeGen/ARM/rem_crash.ll =================================================================== --- test/CodeGen/ARM/rem_crash.ll +++ test/CodeGen/ARM/rem_crash.ll @@ -2,7 +2,7 @@ define i8 @test_minsize_uu8(i8 %x) minsize optsize { entry: - %0 = udiv i8 %x, 10 + %0 = udiv nof i8 %x, 10 %1 = urem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -10,7 +10,7 @@ define i8 @test_minsize_ss8(i8 %x) minsize optsize { entry: - %0 = sdiv i8 %x, 10 + %0 = sdiv nof i8 %x, 10 %1 = srem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -18,7 +18,7 @@ define i8 @test_minsize_us8(i8 %x) minsize optsize { entry: - %0 = udiv i8 %x, 10 + %0 = udiv nof i8 %x, 10 %1 = srem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -26,7 +26,7 @@ define i8 @test_minsize_su8(i8 %x) minsize optsize { entry: - %0 = sdiv i8 %x, 10 + %0 = sdiv nof i8 %x, 10 %1 = urem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -34,7 +34,7 @@ define i16 @test_minsize_uu16(i16 %x) minsize optsize { entry: - %0 = udiv i16 %x, 10 + %0 = udiv nof i16 %x, 10 %1 = urem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -42,7 +42,7 @@ define i16 @test_minsize_ss16(i16 %x) minsize optsize { entry: - %0 = sdiv i16 %x, 10 + %0 = sdiv nof i16 %x, 10 %1 = srem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -50,7 +50,7 @@ define i16 @test_minsize_us16(i16 %x) minsize optsize { entry: - %0 = udiv i16 %x, 10 + %0 = udiv nof i16 %x, 10 %1 = srem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -58,7 +58,7 @@ define i16 @test_minsize_su16(i16 %x) minsize optsize { entry: - %0 = sdiv i16 %x, 10 + %0 = sdiv nof i16 %x, 10 %1 = urem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -66,7 +66,7 @@ define i32 @test_minsize_uu32(i32 %x) minsize optsize { entry: - %0 = udiv i32 %x, 10 + %0 = udiv nof i32 %x, 10 %1 = urem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -74,7 +74,7 @@ define i32 @test_minsize_ss32(i32 %x) minsize optsize { entry: - %0 = sdiv i32 %x, 10 + %0 = sdiv nof i32 %x, 10 %1 = srem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -82,7 +82,7 @@ define i32 @test_minsize_us32(i32 %x) minsize optsize { entry: - %0 = udiv i32 %x, 10 + %0 = udiv nof i32 %x, 10 %1 = srem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -90,7 +90,7 @@ define i32 @test_minsize_su32(i32 %x) minsize optsize { entry: - %0 = sdiv i32 %x, 10 + %0 = sdiv nof i32 %x, 10 %1 = urem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -98,7 +98,7 @@ define i64 @test_minsize_uu64(i64 %x) minsize optsize { entry: - %0 = udiv i64 %x, 10 + %0 = udiv nof i64 %x, 10 %1 = urem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -106,7 +106,7 @@ define i64 @test_minsize_ss64(i64 %x) minsize optsize { entry: - %0 = sdiv i64 %x, 10 + %0 = sdiv nof i64 %x, 10 %1 = srem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -114,7 +114,7 @@ define i64 @test_minsize_us64(i64 %x) minsize optsize { entry: - %0 = udiv i64 %x, 10 + %0 = udiv nof i64 %x, 10 %1 = srem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -122,7 +122,7 @@ define i64 @test_minsize_su64(i64 %x) minsize optsize { entry: - %0 = sdiv i64 %x, 10 + %0 = sdiv nof i64 %x, 10 %1 = urem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -130,7 +130,7 @@ define i8 @test_uu8(i8 %x) optsize { entry: - %0 = udiv i8 %x, 10 + %0 = udiv nof i8 %x, 10 %1 = urem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -138,7 +138,7 @@ define i8 @test_ss8(i8 %x) optsize { entry: - %0 = sdiv i8 %x, 10 + %0 = sdiv nof i8 %x, 10 %1 = srem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -146,7 +146,7 @@ define i8 @test_us8(i8 %x) optsize { entry: - %0 = udiv i8 %x, 10 + %0 = udiv nof i8 %x, 10 %1 = srem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -154,7 +154,7 @@ define i8 @test_su8(i8 %x) optsize { entry: - %0 = sdiv i8 %x, 10 + %0 = sdiv nof i8 %x, 10 %1 = urem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -162,7 +162,7 @@ define i16 @test_uu16(i16 %x) optsize { entry: - %0 = udiv i16 %x, 10 + %0 = udiv nof i16 %x, 10 %1 = urem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -170,7 +170,7 @@ define i16 @test_ss16(i16 %x) optsize { entry: - %0 = sdiv i16 %x, 10 + %0 = sdiv nof i16 %x, 10 %1 = srem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -178,7 +178,7 @@ define i16 @test_us16(i16 %x) optsize { entry: - %0 = udiv i16 %x, 10 + %0 = udiv nof i16 %x, 10 %1 = srem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -186,7 +186,7 @@ define i16 @test_su16(i16 %x) optsize { entry: - %0 = sdiv i16 %x, 10 + %0 = sdiv nof i16 %x, 10 %1 = urem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -194,7 +194,7 @@ define i32 @test_uu32(i32 %x) optsize { entry: - %0 = udiv i32 %x, 10 + %0 = udiv nof i32 %x, 10 %1 = urem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -202,7 +202,7 @@ define i32 @test_ss32(i32 %x) optsize { entry: - %0 = sdiv i32 %x, 10 + %0 = sdiv nof i32 %x, 10 %1 = srem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -210,7 +210,7 @@ define i32 @test_us32(i32 %x) optsize { entry: - %0 = udiv i32 %x, 10 + %0 = udiv nof i32 %x, 10 %1 = srem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -218,7 +218,7 @@ define i32 @test_su32(i32 %x) optsize { entry: - %0 = sdiv i32 %x, 10 + %0 = sdiv nof i32 %x, 10 %1 = urem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -226,7 +226,7 @@ define i64 @test_uu64(i64 %x) optsize { entry: - %0 = udiv i64 %x, 10 + %0 = udiv nof i64 %x, 10 %1 = urem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -234,7 +234,7 @@ define i64 @test_ss64(i64 %x) optsize { entry: - %0 = sdiv i64 %x, 10 + %0 = sdiv nof i64 %x, 10 %1 = srem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -242,7 +242,7 @@ define i64 @test_us64(i64 %x) optsize { entry: - %0 = udiv i64 %x, 10 + %0 = udiv nof i64 %x, 10 %1 = srem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -250,7 +250,7 @@ define i64 @test_su64(i64 %x) optsize { entry: - %0 = sdiv i64 %x, 10 + %0 = sdiv nof i64 %x, 10 %1 = urem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res Index: test/CodeGen/ARM/select-imm.ll =================================================================== --- test/CodeGen/ARM/select-imm.ll +++ test/CodeGen/ARM/select-imm.ll @@ -276,7 +276,7 @@ store i32 -8, i32* %p %0 = load i32, i32* %q %1 = load i32, i32* %p - %div = sdiv i32 %0, %1 + %div = sdiv nof i32 %0, %1 %mul = mul nsw i32 %div, %1 %rem = srem i32 %0, %1 %add = add nsw i32 %mul, %rem Index: test/CodeGen/ARM/thumb1-div.ll =================================================================== --- test/CodeGen/ARM/thumb1-div.ll +++ test/CodeGen/ARM/thumb1-div.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: f1 ; CHECK: sdiv - %tmp1 = sdiv i32 %a, %b ; <i32> [#uses=1] + %tmp1 = sdiv nof i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } @@ -14,7 +14,7 @@ entry: ; CHECK-LABEL: f2 ; CHECK: udiv - %tmp1 = udiv i32 %a, %b ; <i32> [#uses=1] + %tmp1 = udiv nof i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } Index: test/CodeGen/ARM/thumb2-size-reduction-internal-flags.ll =================================================================== --- test/CodeGen/ARM/thumb2-size-reduction-internal-flags.ll +++ test/CodeGen/ARM/thumb2-size-reduction-internal-flags.ll @@ -17,7 +17,7 @@ entry: %0 = load i32, i32* @reg_len, align 4, !tbaa !3 %sub = add nsw i32 %0, -1 - %div = sdiv i32 %sub, 31 + %div = sdiv nof i32 %sub, 31 %rem2 = srem i32 %sub, 31 %cmp35202 = icmp sgt i32 %rem2, 0 br label %for.cond3.preheader Index: test/CodeGen/ARM/urem-opt-size.ll =================================================================== --- test/CodeGen/ARM/urem-opt-size.ll +++ test/CodeGen/ARM/urem-opt-size.ll @@ -19,7 +19,7 @@ ; CHECK:__aeabi_idiv ; CHECK-NOT: smmul %call = tail call i32 bitcast (i32 (...)* @GetValue to i32 ()*)() - %div = sdiv i32 %call, 1000000 + %div = sdiv nof i32 %call, 1000000 ret i32 %div } @@ -29,7 +29,7 @@ ; CHECK: __aeabi_uidiv ; CHECK-NOT: umull %call = tail call i32 bitcast (i32 (...)* @GetValue to i32 ()*)() - %div = udiv i32 %call, 1000000 + %div = udiv nof i32 %call, 1000000 ret i32 %div } @@ -77,7 +77,7 @@ ; V7M: mls {{r[0-9]+}}, [[R2]], [[R1]], [[R0]] ; V7M-NOT: __aeabi_idivmod %call = tail call i32 bitcast (i32 (...)* @GetValue to i32 ()*)() - %div = sdiv i32 %call, 1000000 + %div = sdiv nof i32 %call, 1000000 %rem = srem i32 %call, 1000000 %add = add i32 %div, %rem ret i32 %add @@ -95,7 +95,7 @@ %temp.0 = sext i32 %bar to i64 %mul83 = shl i64 %temp.0, 1 %add84 = add i64 %temp.0, 2 - %div85 = udiv i64 %mul83, %add84 + %div85 = udiv nof i64 %mul83, %add84 ret i64 %div85 } @@ -107,7 +107,7 @@ ; CHECK-NOT: __aeabi_ %mul83 = shl i16 %bar, 1 %add84 = add i16 %bar, 2 - %div85 = udiv i16 %mul83, %add84 + %div85 = udiv nof i16 %mul83, %add84 ret i16 %div85 } declare i32 @GetValue(...) local_unnamed_addr Index: test/CodeGen/ARM/vector-extend-narrow.ll =================================================================== --- test/CodeGen/ARM/vector-extend-narrow.ll +++ test/CodeGen/ARM/vector-extend-narrow.ll @@ -59,7 +59,7 @@ ; CHECK: vmul ; CHECK: vmovn %1 = load <4 x i8>, <4 x i8>* %x, align 4 - %2 = sdiv <4 x i8> zeroinitializer, %1 + %2 = sdiv nof <4 x i8> zeroinitializer, %1 ret <4 x i8> %2 } ; CHECK-LABEL: j: Index: test/CodeGen/ARM/vector-promotion.ll =================================================================== --- test/CodeGen/ARM/vector-promotion.ll +++ test/CodeGen/ARM/vector-promotion.ll @@ -125,9 +125,9 @@ ; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1 ; Scalar version: ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 1 -; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = udiv i32 [[EXTRACT]], 7 +; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = udiv nof i32 [[EXTRACT]], 7 ; Vector version: -; IR-STRESS-NEXT: [[DIV:%[a-zA-Z_0-9-]+]] = udiv <2 x i32> [[LOAD]], <i32 7, i32 7> +; IR-STRESS-NEXT: [[DIV:%[a-zA-Z_0-9-]+]] = udiv nof <2 x i32> [[LOAD]], <i32 7, i32 7> ; IR-STRESS-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[DIV]], i32 1 ; ; IR-BOTH-NEXT: store i32 [[RES]], i32* %dest @@ -135,7 +135,7 @@ define void @udivCase(<2 x i32>* %addr1, i32* %dest) { %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8 %extract = extractelement <2 x i32> %in1, i32 1 - %out = udiv i32 %extract, 7 + %out = udiv nof i32 %extract, 7 store i32 %out, i32* %dest, align 4 ret void } @@ -163,9 +163,9 @@ ; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1 ; Scalar version: ; IR-NORMAL-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 1 -; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = sdiv i32 [[EXTRACT]], 7 +; IR-NORMAL-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = sdiv nof i32 [[EXTRACT]], 7 ; Vector version: -; IR-STRESS-NEXT: [[DIV:%[a-zA-Z_0-9-]+]] = sdiv <2 x i32> [[LOAD]], <i32 7, i32 7> +; IR-STRESS-NEXT: [[DIV:%[a-zA-Z_0-9-]+]] = sdiv nof <2 x i32> [[LOAD]], <i32 7, i32 7> ; IR-STRESS-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[DIV]], i32 1 ; ; IR-BOTH-NEXT: store i32 [[RES]], i32* %dest @@ -173,7 +173,7 @@ define void @sdivCase(<2 x i32>* %addr1, i32* %dest) { %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8 %extract = extractelement <2 x i32> %in1, i32 1 - %out = sdiv i32 %extract, 7 + %out = sdiv nof i32 %extract, 7 store i32 %out, i32* %dest, align 4 ret void } @@ -240,13 +240,13 @@ ; IR-BOTH-LABEL: @undefDivCase ; IR-BOTH: [[LOAD:%[a-zA-Z_0-9-]+]] = load <2 x i32>, <2 x i32>* %addr1 ; IR-BOTH-NEXT: [[EXTRACT:%[a-zA-Z_0-9-]+]] = extractelement <2 x i32> [[LOAD]], i32 1 -; IR-BOTH-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = udiv i32 7, [[EXTRACT]] +; IR-BOTH-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = udiv nof i32 7, [[EXTRACT]] ; IR-BOTH-NEXT: store i32 [[RES]], i32* %dest ; IR-BOTH-NEXT: ret define void @undefDivCase(<2 x i32>* %addr1, i32* %dest) { %in1 = load <2 x i32>, <2 x i32>* %addr1, align 8 %extract = extractelement <2 x i32> %in1, i32 1 - %out = udiv i32 7, %extract + %out = udiv nof i32 7, %extract store i32 %out, i32* %dest, align 4 ret void } Index: test/CodeGen/AVR/div.ll =================================================================== --- test/CodeGen/AVR/div.ll +++ test/CodeGen/AVR/div.ll @@ -6,7 +6,7 @@ ; CHECK: call __udivmodqi4 ; CHECK-NEXT: ret - %quotient = udiv i8 %a, %b + %quotient = udiv nof i8 %a, %b ret i8 %quotient } @@ -16,7 +16,7 @@ ; CHECK: call __divmodqi4 ; CHECK-NEXT: ret - %quotient = sdiv i8 %a, %b + %quotient = sdiv nof i8 %a, %b ret i8 %quotient } @@ -26,7 +26,7 @@ ; CHECK: call __udivmodhi4 ; CHECK-NEXT: movw r24, r22 ; CHECK-NEXT: ret - %quot = udiv i16 %a, %b + %quot = udiv nof i16 %a, %b ret i16 %quot } @@ -36,7 +36,7 @@ ; CHECK: call __divmodhi4 ; CHECK-NEXT: movw r24, r22 ; CHECK-NEXT: ret - %quot = sdiv i16 %a, %b + %quot = sdiv nof i16 %a, %b ret i16 %quot } @@ -47,7 +47,7 @@ ; CHECK-NEXT: movw r22, r18 ; CHECK-NEXT: movw r24, r20 ; CHECK-NEXT: ret - %quot = udiv i32 %a, %b + %quot = udiv nof i32 %a, %b ret i32 %quot } @@ -58,7 +58,7 @@ ; CHECK-NEXT: movw r22, r18 ; CHECK-NEXT: movw r24, r20 ; CHECK-NEXT: ret - %quot = sdiv i32 %a, %b + %quot = sdiv nof i32 %a, %b ret i32 %quot } Index: test/CodeGen/BPF/sanity.ll =================================================================== --- test/CodeGen/BPF/sanity.ll +++ test/CodeGen/BPF/sanity.ll @@ -75,7 +75,7 @@ ; <label>:5 ; preds = %0 %6 = trunc i64 %d to i32 - %7 = udiv i32 %6, %c + %7 = udiv nof i32 %6, %c br label %8 ; <label>:8 ; preds = %5, %2 Index: test/CodeGen/Generic/2006-02-12-InsertLibcall.ll =================================================================== --- test/CodeGen/Generic/2006-02-12-InsertLibcall.ll +++ test/CodeGen/Generic/2006-02-12-InsertLibcall.ll @@ -7,7 +7,7 @@ br i1 %tmp.9, label %endif.0, label %shortcirc_next.0 then.5.i: ; preds = %shortcirc_next.i - %tmp.114.i = sdiv i64 %tmp.2i, 3 ; <i64> [#uses=1] + %tmp.114.i = sdiv nof i64 %tmp.2i, 3 ; <i64> [#uses=1] %tmp.111.i = call i64 @lseek( i32 0, i64 %tmp.114.i, i32 1 ) ; <i64> [#uses=0] ret void Index: test/CodeGen/Generic/2006-08-30-CoalescerCrash.ll =================================================================== --- test/CodeGen/Generic/2006-08-30-CoalescerCrash.ll +++ test/CodeGen/Generic/2006-08-30-CoalescerCrash.ll @@ -83,7 +83,7 @@ cond_next41: ; preds = %cond_true30 %tmp44 = call i32 @force_var_cost( %struct.ivopts_data* %data, %struct.tree_node* null, %struct.bitmap_head_def** null ) ; <i32> [#uses=2] - %tmp46 = udiv i32 %tmp44, 5 ; <i32> [#uses=1] + %tmp46 = udiv nof i32 %tmp44, 5 ; <i32> [#uses=1] call void @set_use_iv_cost( %struct.ivopts_data* %data, %struct.iv_use* %use, %struct.iv_cand* %cand, i32 %tmp46, %struct.bitmap_head_def* null ) %tmp44.off = add i32 %tmp44, -50000000 ; <i32> [#uses=1] %tmp52 = icmp ugt i32 %tmp44.off, 4 ; <i1> [#uses=1] Index: test/CodeGen/Generic/BasicInstrs.ll =================================================================== --- test/CodeGen/Generic/BasicInstrs.ll +++ test/CodeGen/Generic/BasicInstrs.ll @@ -19,12 +19,12 @@ } define i32 @sdiv(i32 %A, i32 %B) { - %R = sdiv i32 %A, %B ; <i32> [#uses=1] + %R = sdiv nof i32 %A, %B ; <i32> [#uses=1] ret i32 %R } define i32 @udiv(i32 %A, i32 %B) { - %R = udiv i32 %A, %B ; <i32> [#uses=1] + %R = udiv nof i32 %A, %B ; <i32> [#uses=1] ret i32 %R } Index: test/CodeGen/Generic/div-neg-power-2.ll =================================================================== --- test/CodeGen/Generic/div-neg-power-2.ll +++ test/CodeGen/Generic/div-neg-power-2.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s define i32 @test(i32 %X) { - %Y = sdiv i32 %X, -2 ; <i32> [#uses=1] + %Y = sdiv nof i32 %X, -2 ; <i32> [#uses=1] ret i32 %Y } Index: test/CodeGen/Generic/print-arith-int.ll =================================================================== --- test/CodeGen/Generic/print-arith-int.ll +++ test/CodeGen/Generic/print-arith-int.ll @@ -32,7 +32,7 @@ %add_r = add i32 %a, %b ; <i32> [#uses=1] %sub_r = sub i32 %a, %b ; <i32> [#uses=1] %mul_r = mul i32 %a, %b ; <i32> [#uses=1] - %div_r = sdiv i32 %b, %a ; <i32> [#uses=1] + %div_r = sdiv nof i32 %b, %a ; <i32> [#uses=1] %rem_r = srem i32 %b, %a ; <i32> [#uses=1] %add_s = getelementptr [12 x i8], [12 x i8]* @add_str, i64 0, i64 0 ; <i8*> [#uses=1] %sub_s = getelementptr [12 x i8], [12 x i8]* @sub_str, i64 0, i64 0 ; <i8*> [#uses=1] Index: test/CodeGen/Hexagon/expand-condsets-rm-segment.ll =================================================================== --- test/CodeGen/Hexagon/expand-condsets-rm-segment.ll +++ test/CodeGen/Hexagon/expand-condsets-rm-segment.ll @@ -72,7 +72,7 @@ br label %if.end22 if.else19: ; preds = %if.then5 - %div = udiv i32 -1, %1 + %div = udiv nof i32 -1, %1 store i32 %div, i32* %inv_weight, align 4, !tbaa !6 br label %if.end22 Index: test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll =================================================================== --- test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll +++ test/CodeGen/Hexagon/loop-idiom/pmpy-infinite-loop.ll @@ -41,7 +41,7 @@ %add344 = add nsw i32 %add335, %3 %add353 = add i32 %add344, %5 %add362 = add i32 %add353, %6 - %div363 = sdiv i32 %add362, 6 + %div363 = sdiv nof i32 %add362, 6 %conv364 = trunc i32 %div363 to i16 %sext268 = shl i32 %div363, 16 %conv369 = ashr exact i32 %sext268, 16 Index: test/CodeGen/Hexagon/macint.ll =================================================================== --- test/CodeGen/Hexagon/macint.ll +++ test/CodeGen/Hexagon/macint.ll @@ -6,7 +6,7 @@ define i32 @main(i32* %a, i32* %b) nounwind { entry: %0 = load i32, i32* %a, align 4 - %div = udiv i32 %0, 10000 + %div = udiv nof i32 %0, 10000 %rem = urem i32 %div, 10 store i32 %rem, i32* %b, align 4 ret i32 0 Index: test/CodeGen/Hexagon/multi-cycle.ll =================================================================== --- test/CodeGen/Hexagon/multi-cycle.ll +++ test/CodeGen/Hexagon/multi-cycle.ll @@ -25,7 +25,7 @@ %v13 = load <16 x i32>, <16 x i32>* %v10, align 64, !tbaa !1 %v14 = load <16 x i32>, <16 x i32>* @ZERO, align 64, !tbaa !1 %v15 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %v14, <16 x i32> %v14) - %v16 = sdiv i32 %a2, 32 + %v16 = sdiv nof i32 %a2, 32 %v17 = icmp sgt i32 %a2, 31 br i1 %v17, label %b18, label %b66 Index: test/CodeGen/Hexagon/swp-max.ll =================================================================== --- test/CodeGen/Hexagon/swp-max.ll +++ test/CodeGen/Hexagon/swp-max.ll @@ -6,7 +6,7 @@ define i32 @test(i32 %Left, i32 %Right) { entry: %add = add nsw i32 %Right, %Left - %div = sdiv i32 %add, 2 + %div = sdiv nof i32 %add, 2 %cmp9 = icmp slt i32 %div, %Left br i1 %cmp9, label %for.end, label %for.body.preheader Index: test/CodeGen/Lanai/i32.ll =================================================================== --- test/CodeGen/Lanai/i32.ll +++ test/CodeGen/Lanai/i32.ll @@ -38,14 +38,14 @@ ; CHECK-LABEL: sdiv32: ; CHECK: bt __divsi3 define i32 @sdiv32(i32 %x, i32 %y) { - %a = sdiv i32 %x, %y + %a = sdiv nof i32 %x, %y ret i32 %a } ; CHECK-LABEL: udiv32: ; CHECK: bt __udivsi3 define i32 @udiv32(i32 %x, i32 %y) { - %a = udiv i32 %x, %y + %a = udiv nof i32 %x, %y ret i32 %a } Index: test/CodeGen/MIR/X86/dynamic-regmask.ll =================================================================== --- test/CodeGen/MIR/X86/dynamic-regmask.ll +++ test/CodeGen/MIR/X86/dynamic-regmask.ll @@ -16,7 +16,7 @@ define x86_regcallcc {i32, i32, i32} @test_callee(i32 %a0, i32 %b0, i32 %c0, i32 %d0, i32 %e0) nounwind { %b1 = mul i32 7, %e0 - %b2 = udiv i32 5, %e0 + %b2 = udiv nof i32 5, %e0 %b3 = mul i32 7, %d0 %b4 = insertvalue {i32, i32, i32} undef, i32 %b1, 0 %b5 = insertvalue {i32, i32, i32} %b4, i32 %b2, 1 Index: test/CodeGen/MSP430/2009-11-20-NewNode.ll =================================================================== --- test/CodeGen/MSP430/2009-11-20-NewNode.ll +++ test/CodeGen/MSP430/2009-11-20-NewNode.ll @@ -11,7 +11,7 @@ if.end27: ; preds = %if.then, %do.end %cond66 = select i1 undef, i64 -9223372036854775808, i64 9223372036854775807 ; <i64> [#uses=3] %conv69 = sext i16 %base to i64 ; <i64> [#uses=1] - %div = udiv i64 %cond66, %conv69 ; <i64> [#uses=1] + %div = udiv nof i64 %cond66, %conv69 ; <i64> [#uses=1] br label %for.cond for.cond: ; preds = %if.end116, %if.end27 Index: test/CodeGen/MSP430/libcalls.ll =================================================================== --- test/CodeGen/MSP430/libcalls.ll +++ test/CodeGen/MSP430/libcalls.ll @@ -433,7 +433,7 @@ ; CHECK: call #__mspabi_divi %0 = load volatile i16, i16* @g_i16, align 8 - %1 = sdiv i16 %0, %0 + %1 = sdiv nof i16 %0, %0 ret i16 %1 } @@ -444,7 +444,7 @@ ; CHECK: call #__mspabi_divli %0 = load volatile i32, i32* @g_i32, align 8 - %1 = sdiv i32 %0, %0 + %1 = sdiv nof i32 %0, %0 ret i32 %1 } @@ -455,7 +455,7 @@ ; CHECK: call #__mspabi_divlli %0 = load volatile i64, i64* @g_i64, align 8 - %1 = sdiv i64 %0, %0 + %1 = sdiv nof i64 %0, %0 ret i64 %1 } @@ -466,7 +466,7 @@ ; CHECK: call #__mspabi_divu %0 = load volatile i16, i16* @g_i16, align 8 - %1 = udiv i16 %0, %0 + %1 = udiv nof i16 %0, %0 ret i16 %1 } @@ -477,7 +477,7 @@ ; CHECK: call #__mspabi_divul %0 = load volatile i32, i32* @g_i32, align 8 - %1 = udiv i32 %0, %0 + %1 = udiv nof i32 %0, %0 ret i32 %1 } @@ -488,7 +488,7 @@ ; CHECK: call #__mspabi_divull %0 = load volatile i64, i64* @g_i64, align 8 - %1 = udiv i64 %0, %0 + %1 = udiv nof i64 %0, %0 ret i64 %1 } Index: test/CodeGen/Mips/Fast-ISel/div1.ll =================================================================== --- test/CodeGen/Mips/Fast-ISel/div1.ll +++ test/CodeGen/Mips/Fast-ISel/div1.ll @@ -27,7 +27,7 @@ ; CHECK: sw $[[RESULT]], 0($[[I_ADDR]]) %1 = load i32, i32* @sj, align 4 %2 = load i32, i32* @sk, align 4 - %div = sdiv i32 %1, %2 + %div = sdiv nof i32 %1, %2 store i32 %div, i32* @si, align 4 ret void } @@ -49,7 +49,7 @@ ; CHECK: sw $[[RESULT]], 0($[[I_ADDR]]) %1 = load i32, i32* @uj, align 4 %2 = load i32, i32* @uk, align 4 - %div = udiv i32 %1, %2 + %div = udiv nof i32 %1, %2 store i32 %div, i32* @ui, align 4 ret void } Index: test/CodeGen/Mips/assertzext-trunc.ll =================================================================== --- test/CodeGen/Mips/assertzext-trunc.ll +++ test/CodeGen/Mips/assertzext-trunc.ll @@ -28,7 +28,7 @@ ; R6: divu $2, $4, $5 ; R6: teq $5, $zero, 7 - %r = udiv i8 %a, %b + %r = udiv nof i8 %a, %b ret i8 %r } Index: test/CodeGen/Mips/div.ll =================================================================== --- test/CodeGen/Mips/div.ll +++ test/CodeGen/Mips/div.ll @@ -8,7 +8,7 @@ entry: %0 = load i32, i32* @iiii, align 4 %1 = load i32, i32* @jjjj, align 4 - %div = sdiv i32 %0, %1 + %div = sdiv nof i32 %0, %1 ; 16: div $zero, ${{[0-9]+}}, ${{[0-9]+}} ; 16: mflo ${{[0-9]+}} store i32 %div, i32* @kkkk, align 4 Index: test/CodeGen/Mips/div_rem.ll =================================================================== --- test/CodeGen/Mips/div_rem.ll +++ test/CodeGen/Mips/div_rem.ll @@ -9,7 +9,7 @@ entry: %0 = load i32, i32* @iiii, align 4 %1 = load i32, i32* @jjjj, align 4 - %div = sdiv i32 %0, %1 + %div = sdiv nof i32 %0, %1 store i32 %div, i32* @kkkk, align 4 %rem = srem i32 %0, %1 ; 16: div $zero, ${{[0-9]+}}, ${{[0-9]+}} Index: test/CodeGen/Mips/divrem.ll =================================================================== --- test/CodeGen/Mips/divrem.ll +++ test/CodeGen/Mips/divrem.ll @@ -50,7 +50,7 @@ ; ALL: .end sdiv1 - %div = sdiv i32 %a0, %a1 + %div = sdiv nof i32 %a0, %a1 ret i32 %div } @@ -103,7 +103,7 @@ ; ACC64: mflo $2 ; ALL: .end udiv1 - %div = udiv i32 %a0, %a1 + %div = udiv nof i32 %a0, %a1 ret i32 %div } @@ -171,7 +171,7 @@ %rem = srem i32 %a0, %a1 store i32 %rem, i32* %r, align 4 - %div = sdiv i32 %a0, %a1 + %div = sdiv nof i32 %a0, %a1 ret i32 %div } @@ -213,7 +213,7 @@ %rem = urem i32 %a0, %a1 store i32 %rem, i32* %r, align 4 - %div = udiv i32 %a0, %a1 + %div = udiv nof i32 %a0, %a1 ret i32 %div } @@ -222,7 +222,7 @@ entry: %0 = load i32, i32* @g0, align 4 %1 = load i32, i32* @g1, align 4 - %div = sdiv i32 %0, %1 + %div = sdiv nof i32 %0, %1 ret i32 %div } @@ -245,7 +245,7 @@ ; ALL: .end sdiv2 - %div = sdiv i64 %a0, %a1 + %div = sdiv nof i64 %a0, %a1 ret i64 %div } @@ -290,7 +290,7 @@ ; ACC64: mflo $2 ; ALL: .end udiv2 - %div = udiv i64 %a0, %a1 + %div = udiv nof i64 %a0, %a1 ret i64 %div } @@ -348,7 +348,7 @@ %rem = srem i64 %a0, %a1 store i64 %rem, i64* %r, align 8 - %div = sdiv i64 %a0, %a1 + %div = sdiv nof i64 %a0, %a1 ret i64 %div } @@ -383,6 +383,6 @@ %rem = urem i64 %a0, %a1 store i64 %rem, i64* %r, align 8 - %div = udiv i64 %a0, %a1 + %div = udiv nof i64 %a0, %a1 ret i64 %div } Index: test/CodeGen/Mips/divu.ll =================================================================== --- test/CodeGen/Mips/divu.ll +++ test/CodeGen/Mips/divu.ll @@ -8,7 +8,7 @@ entry: %0 = load i32, i32* @iiii, align 4 %1 = load i32, i32* @jjjj, align 4 - %div = udiv i32 %0, %1 + %div = udiv nof i32 %0, %1 ; 16: divu $zero, ${{[0-9]+}}, ${{[0-9]+}} ; 16: mflo ${{[0-9]+}} store i32 %div, i32* @kkkk, align 4 Index: test/CodeGen/Mips/divu_remu.ll =================================================================== --- test/CodeGen/Mips/divu_remu.ll +++ test/CodeGen/Mips/divu_remu.ll @@ -10,7 +10,7 @@ entry: %0 = load i32, i32* @iiii, align 4 %1 = load i32, i32* @jjjj, align 4 - %div = udiv i32 %0, %1 + %div = udiv nof i32 %0, %1 store i32 %div, i32* @kkkk, align 4 %rem = urem i32 %0, %1 ; 16: divu $zero, ${{[0-9]+}}, ${{[0-9]+}} Index: test/CodeGen/Mips/llvm-ir/sdiv.ll =================================================================== --- test/CodeGen/Mips/llvm-ir/sdiv.ll +++ test/CodeGen/Mips/llvm-ir/sdiv.ll @@ -61,7 +61,7 @@ ; MMR6: li16 $[[T1:[0-9]+]], 0 ; MMR6: subu16 $2, $[[T1]], $[[T0]] - %r = sdiv i1 %a, %b + %r = sdiv nof i1 %a, %b ret i1 %r } @@ -96,7 +96,7 @@ ; MMR6: teq $5, $zero, 7 ; MMR6: seb $2, $[[T0]] - %r = sdiv i8 %a, %b + %r = sdiv nof i8 %a, %b ret i8 %r } @@ -131,7 +131,7 @@ ; MMR6: teq $5, $zero, 7 ; MMR6: seh $2, $[[T0]] - %r = sdiv i16 %a, %b + %r = sdiv nof i16 %a, %b ret i16 %r } @@ -153,7 +153,7 @@ ; MMR6: div $2, $4, $5 ; MMR6: teq $5, $zero, 7 - %r = sdiv i32 %a, %b + %r = sdiv nof i32 %a, %b ret i32 %r } @@ -172,7 +172,7 @@ ; MM32: lw $25, %call16(__divdi3)($2) - %r = sdiv i64 %a, %b + %r = sdiv nof i64 %a, %b ret i64 %r } @@ -187,6 +187,6 @@ ; MM32: lw $25, %call16(__divti3)($16) - %r = sdiv i128 %a, %b + %r = sdiv nof i128 %a, %b ret i128 %r } Index: test/CodeGen/Mips/llvm-ir/udiv.ll =================================================================== --- test/CodeGen/Mips/llvm-ir/udiv.ll +++ test/CodeGen/Mips/llvm-ir/udiv.ll @@ -49,7 +49,7 @@ ; MMR6: divu $2, $4, $5 ; MMR6: teq $5, $zero, 7 - %r = udiv i1 %a, %b + %r = udiv nof i1 %a, %b ret i1 %r } @@ -71,7 +71,7 @@ ; MMR6: divu $2, $4, $5 ; MMR6: teq $5, $zero, 7 - %r = udiv i8 %a, %b + %r = udiv nof i8 %a, %b ret i8 %r } @@ -93,7 +93,7 @@ ; MMR6: divu $2, $4, $5 ; MMR6: teq $5, $zero, 7 - %r = udiv i16 %a, %b + %r = udiv nof i16 %a, %b ret i16 %r } @@ -115,7 +115,7 @@ ; MMR6: divu $2, $4, $5 ; MMR6: teq $5, $zero, 7 - %r = udiv i32 %a, %b + %r = udiv nof i32 %a, %b ret i32 %r } @@ -134,7 +134,7 @@ ; MM32: lw $25, %call16(__udivdi3)($2) - %r = udiv i64 %a, %b + %r = udiv nof i64 %a, %b ret i64 %r } @@ -149,6 +149,6 @@ ; MM32: lw $25, %call16(__udivti3)($16) - %r = udiv i128 %a, %b + %r = udiv nof i128 %a, %b ret i128 %r } Index: test/CodeGen/Mips/mips64instrs.ll =================================================================== --- test/CodeGen/Mips/mips64instrs.ll +++ test/CodeGen/Mips/mips64instrs.ll @@ -123,7 +123,7 @@ %0 = load i64, i64* @gll0, align 8 %1 = load i64, i64* @gll1, align 8 - %div = sdiv i64 %0, %1 + %div = sdiv nof i64 %0, %1 ret i64 %div } @@ -142,7 +142,7 @@ %0 = load i64, i64* @gll0, align 8 %1 = load i64, i64* @gll1, align 8 - %div = udiv i64 %0, %1 + %div = udiv nof i64 %0, %1 ret i64 %div } Index: test/CodeGen/Mips/mips64muldiv.ll =================================================================== --- test/CodeGen/Mips/mips64muldiv.ll +++ test/CodeGen/Mips/mips64muldiv.ll @@ -34,7 +34,7 @@ ; ALL: dsrl $2, $[[T1]], 63 ; ALL: daddu $2, $[[T1]], $2 - %div = sdiv i64 %a, 3 + %div = sdiv nof i64 %a, 3 ret i64 %div } @@ -44,7 +44,7 @@ ; ACC: ddivu $zero, $4, $5 ; ACC: mflo $2 ; GPR: ddivu $2, $4, $5 - %div = udiv i64 %a0, %a1 + %div = udiv nof i64 %a0, %a1 ret i64 %div } @@ -54,7 +54,7 @@ ; ACC: ddiv $zero, $4, $5 ; ACC: mflo $2 ; GPR: ddiv $2, $4, $5 - %div = sdiv i64 %a0, %a1 + %div = sdiv nof i64 %a0, %a1 ret i64 %div } Index: test/CodeGen/Mips/msa/3r-d.ll =================================================================== --- test/CodeGen/Mips/msa/3r-d.ll +++ test/CodeGen/Mips/msa/3r-d.ll @@ -97,7 +97,7 @@ entry: %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG1 %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_s_b_ARG2 - %2 = sdiv <16 x i8> %0, %1 + %2 = sdiv nof <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_div_s_b_RES ret void } @@ -113,7 +113,7 @@ entry: %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG1 %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_s_h_ARG2 - %2 = sdiv <8 x i16> %0, %1 + %2 = sdiv nof <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_div_s_h_RES ret void } @@ -129,7 +129,7 @@ entry: %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG1 %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_s_w_ARG2 - %2 = sdiv <4 x i32> %0, %1 + %2 = sdiv nof <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_div_s_w_RES ret void } @@ -145,7 +145,7 @@ entry: %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG1 %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_s_d_ARG2 - %2 = sdiv <2 x i64> %0, %1 + %2 = sdiv nof <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_div_s_d_RES ret void } @@ -250,7 +250,7 @@ entry: %0 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG1 %1 = load <16 x i8>, <16 x i8>* @llvm_mips_div_u_b_ARG2 - %2 = udiv <16 x i8> %0, %1 + %2 = udiv nof <16 x i8> %0, %1 store <16 x i8> %2, <16 x i8>* @llvm_mips_div_u_b_RES ret void } @@ -266,7 +266,7 @@ entry: %0 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG1 %1 = load <8 x i16>, <8 x i16>* @llvm_mips_div_u_h_ARG2 - %2 = udiv <8 x i16> %0, %1 + %2 = udiv nof <8 x i16> %0, %1 store <8 x i16> %2, <8 x i16>* @llvm_mips_div_u_h_RES ret void } @@ -282,7 +282,7 @@ entry: %0 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG1 %1 = load <4 x i32>, <4 x i32>* @llvm_mips_div_u_w_ARG2 - %2 = udiv <4 x i32> %0, %1 + %2 = udiv nof <4 x i32> %0, %1 store <4 x i32> %2, <4 x i32>* @llvm_mips_div_u_w_RES ret void } @@ -298,7 +298,7 @@ entry: %0 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG1 %1 = load <2 x i64>, <2 x i64>* @llvm_mips_div_u_d_ARG2 - %2 = udiv <2 x i64> %0, %1 + %2 = udiv nof <2 x i64> %0, %1 store <2 x i64> %2, <2 x i64>* @llvm_mips_div_u_d_RES ret void } Index: test/CodeGen/Mips/msa/arithmetic.ll =================================================================== --- test/CodeGen/Mips/msa/arithmetic.ll +++ test/CodeGen/Mips/msa/arithmetic.ll @@ -476,7 +476,7 @@ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) - %3 = sdiv <16 x i8> %1, %2 + %3 = sdiv nof <16 x i8> %1, %2 ; CHECK-DAG: div_s.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] store <16 x i8> %3, <16 x i8>* %c ; CHECK-DAG: st.b [[R3]], 0($4) @@ -492,7 +492,7 @@ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) - %3 = sdiv <8 x i16> %1, %2 + %3 = sdiv nof <8 x i16> %1, %2 ; CHECK-DAG: div_s.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] store <8 x i16> %3, <8 x i16>* %c ; CHECK-DAG: st.h [[R3]], 0($4) @@ -508,7 +508,7 @@ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = sdiv <4 x i32> %1, %2 + %3 = sdiv nof <4 x i32> %1, %2 ; CHECK-DAG: div_s.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] store <4 x i32> %3, <4 x i32>* %c ; CHECK-DAG: st.w [[R3]], 0($4) @@ -524,7 +524,7 @@ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = sdiv <2 x i64> %1, %2 + %3 = sdiv nof <2 x i64> %1, %2 ; CHECK-DAG: div_s.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] store <2 x i64> %3, <2 x i64>* %c ; CHECK-DAG: st.d [[R3]], 0($4) @@ -540,7 +540,7 @@ ; CHECK-DAG: ld.b [[R1:\$w[0-9]+]], 0($5) %2 = load <16 x i8>, <16 x i8>* %b ; CHECK-DAG: ld.b [[R2:\$w[0-9]+]], 0($6) - %3 = udiv <16 x i8> %1, %2 + %3 = udiv nof <16 x i8> %1, %2 ; CHECK-DAG: div_u.b [[R3:\$w[0-9]+]], [[R1]], [[R2]] store <16 x i8> %3, <16 x i8>* %c ; CHECK-DAG: st.b [[R3]], 0($4) @@ -556,7 +556,7 @@ ; CHECK-DAG: ld.h [[R1:\$w[0-9]+]], 0($5) %2 = load <8 x i16>, <8 x i16>* %b ; CHECK-DAG: ld.h [[R2:\$w[0-9]+]], 0($6) - %3 = udiv <8 x i16> %1, %2 + %3 = udiv nof <8 x i16> %1, %2 ; CHECK-DAG: div_u.h [[R3:\$w[0-9]+]], [[R1]], [[R2]] store <8 x i16> %3, <8 x i16>* %c ; CHECK-DAG: st.h [[R3]], 0($4) @@ -572,7 +572,7 @@ ; CHECK-DAG: ld.w [[R1:\$w[0-9]+]], 0($5) %2 = load <4 x i32>, <4 x i32>* %b ; CHECK-DAG: ld.w [[R2:\$w[0-9]+]], 0($6) - %3 = udiv <4 x i32> %1, %2 + %3 = udiv nof <4 x i32> %1, %2 ; CHECK-DAG: div_u.w [[R3:\$w[0-9]+]], [[R1]], [[R2]] store <4 x i32> %3, <4 x i32>* %c ; CHECK-DAG: st.w [[R3]], 0($4) @@ -588,7 +588,7 @@ ; CHECK-DAG: ld.d [[R1:\$w[0-9]+]], 0($5) %2 = load <2 x i64>, <2 x i64>* %b ; CHECK-DAG: ld.d [[R2:\$w[0-9]+]], 0($6) - %3 = udiv <2 x i64> %1, %2 + %3 = udiv nof <2 x i64> %1, %2 ; CHECK-DAG: div_u.d [[R3:\$w[0-9]+]], [[R1]], [[R2]] store <2 x i64> %3, <2 x i64>* %c ; CHECK-DAG: st.d [[R3]], 0($4) Index: test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll =================================================================== --- test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll +++ test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll @@ -38,7 +38,7 @@ %E12 = extractelement <1 x i16> zeroinitializer, i32 0 %Shuff13 = shufflevector <8 x i64> zeroinitializer, <8 x i64> %Shuff, <8 x i32> <i32 9, i32 11, i32 13, i32 15, i32 undef, i32 3, i32 5, i32 7> %I14 = insertelement <4 x i32> zeroinitializer, i32 %3, i32 3 - %B15 = udiv <1 x i16> %B, zeroinitializer + %B15 = udiv nof <1 x i16> %B, zeroinitializer %Tr = trunc <8 x i64> %Shuff to <8 x i32> %Sl16 = select i1 %Cmp, i8 77, i8 %5 %Cmp17 = icmp ult <8 x i1> %Cmp10, %Cmp10 Index: test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll =================================================================== --- test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll +++ test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll @@ -27,7 +27,7 @@ %E6 = extractelement <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, i32 14 %Shuff7 = shufflevector <2 x i32> zeroinitializer, <2 x i32> zeroinitializer, <2 x i32> <i32 1, i32 3> %I8 = insertelement <2 x i32> zeroinitializer, i32 135673, i32 1 - %B9 = udiv i8 %B, %B + %B9 = udiv nof i8 %B, %B %FC = uitofp i32 %3 to double %Sl10 = select i1 true, <1 x i1> zeroinitializer, <1 x i1> zeroinitializer %Cmp = icmp ne <1 x i64> %I, <i64 -1> Index: test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll =================================================================== --- test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll +++ test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll @@ -106,7 +106,7 @@ %E57 = extractelement <4 x i64> %Shuff21, i32 1 %Shuff58 = shufflevector <8 x i64> %Shuff, <8 x i64> zeroinitializer, <8 x i32> <i32 4, i32 6, i32 undef, i32 10, i32 12, i32 undef, i32 0, i32 2> %I59 = insertelement <4 x i64> zeroinitializer, i64 %E42, i32 2 - %B60 = udiv <8 x i8> %Sl, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> + %B60 = udiv nof <8 x i8> %Sl, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> %Tr61 = trunc i8 49 to i1 br i1 %Tr61, label %CF81, label %CF84 Index: test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll =================================================================== --- test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll +++ test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll @@ -38,7 +38,7 @@ %E12 = extractelement <2 x i16> zeroinitializer, i32 1 %Shuff13 = shufflevector <4 x i64> zeroinitializer, <4 x i64> zeroinitializer, <4 x i32> <i32 0, i32 2, i32 4, i32 6> %I14 = insertelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 %B, i32 2 - %B15 = sdiv i64 334618, -1 + %B15 = sdiv nof i64 334618, -1 %PC = bitcast i1* %A4 to i64* %Sl16 = select i1 %Cmp10, <4 x i32> zeroinitializer, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1> %Cmp17 = icmp ule <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %Sl16 @@ -95,7 +95,7 @@ %E48 = extractelement <4 x i64> zeroinitializer, i32 3 %Shuff49 = shufflevector <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 undef> %I50 = insertelement <2 x i1> zeroinitializer, i1 %Cmp10, i32 0 - %B51 = sdiv i64 %E19, 463132 + %B51 = sdiv nof i64 %E19, 463132 %Tr52 = trunc i64 %E48 to i32 %Sl53 = select i1 %Tr, i1 %Cmp46, i1 %Cmp10 br i1 %Sl53, label %CF78, label %CF83 Index: test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll =================================================================== --- test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll +++ test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll @@ -57,7 +57,7 @@ CF78: ; preds = %CF77 %Shuff20 = shufflevector <2 x i1> zeroinitializer, <2 x i1> zeroinitializer, <2 x i32> <i32 1, i32 3> %I21 = insertelement <8 x i1> zeroinitializer, i1 %Cmp10, i32 7 - %B22 = sdiv <4 x i64> %Shuff7, zeroinitializer + %B22 = sdiv nof <4 x i64> %Shuff7, zeroinitializer %FC = uitofp i8 97 to double %Sl23 = select i1 %Cmp10, <2 x i1> zeroinitializer, <2 x i1> zeroinitializer %L24 = load double, double* %Sl @@ -137,7 +137,7 @@ CF79: ; preds = %CF79, %CF73 %Shuff67 = shufflevector <8 x i1> %I21, <8 x i1> %I21, <8 x i32> <i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 undef, i32 4> %I68 = insertelement <1 x i1> %Cmp42, i1 %E25, i32 0 - %B69 = sdiv <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1> + %B69 = sdiv nof <16 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1> %Sl70 = select i1 %Cmp49, <2 x i1> %Sl23, <2 x i1> %Shuff45 %Cmp71 = icmp ne i1 false, false br i1 %Cmp71, label %CF79, label %CF83 Index: test/CodeGen/Mips/msa/llvm-stress-s525530439.ll =================================================================== --- test/CodeGen/Mips/msa/llvm-stress-s525530439.ll +++ test/CodeGen/Mips/msa/llvm-stress-s525530439.ll @@ -126,7 +126,7 @@ %E67 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 2 %Shuff68 = shufflevector <4 x i32> %Sl64, <4 x i32> %I8, <4 x i32> <i32 5, i32 undef, i32 1, i32 undef> %I69 = insertelement <4 x i16> %Shuff47, i16 %Sl, i32 3 - %B70 = sdiv <4 x i64> zeroinitializer, zeroinitializer + %B70 = sdiv nof <4 x i64> zeroinitializer, zeroinitializer %FC71 = sitofp i32 %L66 to double %Sl72 = select i1 %Cmp18, i64 %4, i64 %4 %Cmp73 = icmp eq <4 x i64> zeroinitializer, %B70 Index: test/CodeGen/Mips/msa/llvm-stress-s997348632.ll =================================================================== --- test/CodeGen/Mips/msa/llvm-stress-s997348632.ll +++ test/CodeGen/Mips/msa/llvm-stress-s997348632.ll @@ -113,7 +113,7 @@ %E63 = extractelement <4 x i64> %Shuff, i32 2 %Shuff64 = shufflevector <4 x i64> %Shuff56, <4 x i64> %Shuff56, <4 x i32> <i32 5, i32 7, i32 1, i32 undef> %I65 = insertelement <2 x i1> zeroinitializer, i1 false, i32 1 - %B66 = sdiv i32 %B, %E55 + %B66 = sdiv nof i32 %B, %E55 %Tr67 = trunc i8 %L54 to i1 br i1 %Tr67, label %CF81, label %CF83 Index: test/CodeGen/NVPTX/arithmetic-int.ll =================================================================== --- test/CodeGen/NVPTX/arithmetic-int.ll +++ test/CodeGen/NVPTX/arithmetic-int.ll @@ -56,14 +56,14 @@ define i64 @sdiv_i64(i64 %a, i64 %b) { ; CHECK: div.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret - %ret = sdiv i64 %a, %b + %ret = sdiv nof i64 %a, %b ret i64 %ret } define i64 @udiv_i64(i64 %a, i64 %b) { ; CHECK: div.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret - %ret = udiv i64 %a, %b + %ret = udiv nof i64 %a, %b ret i64 %ret } @@ -153,14 +153,14 @@ define i32 @sdiv_i32(i32 %a, i32 %b) { ; CHECK: div.s32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}} ; CHECK: ret - %ret = sdiv i32 %a, %b + %ret = sdiv nof i32 %a, %b ret i32 %ret } define i32 @udiv_i32(i32 %a, i32 %b) { ; CHECK: div.u32 %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}} ; CHECK: ret - %ret = udiv i32 %a, %b + %ret = udiv nof i32 %a, %b ret i32 %ret } @@ -246,14 +246,14 @@ define i16 @sdiv_i16(i16 %a, i16 %b) { ; CHECK: div.s16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}} ; CHECK: ret - %ret = sdiv i16 %a, %b + %ret = sdiv nof i16 %a, %b ret i16 %ret } define i16 @udiv_i16(i16 %a, i16 %b) { ; CHECK: div.u16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}} ; CHECK: ret - %ret = udiv i16 %a, %b + %ret = udiv nof i16 %a, %b ret i16 %ret } Index: test/CodeGen/NVPTX/bypass-div.ll =================================================================== --- test/CodeGen/NVPTX/bypass-div.ll +++ test/CodeGen/NVPTX/bypass-div.ll @@ -8,7 +8,7 @@ ; CHECK: div.s64 ; CHECK: div.u32 ; CHECK: ret - %d = sdiv i64 %a, %b + %d = sdiv nof i64 %a, %b store i64 %d, i64* %retptr ret void } @@ -18,7 +18,7 @@ ; CHECK: div.u64 ; CHECK: div.u32 ; CHECK: ret - %d = udiv i64 %a, %b + %d = udiv nof i64 %a, %b store i64 %d, i64* %retptr ret void } @@ -47,7 +47,7 @@ ; CHECK-LABEL: sdiv32( ; CHECK: div.s32 ; CHECK-NOT: div. - %d = sdiv i32 %a, %b + %d = sdiv nof i32 %a, %b store i32 %d, i32* %retptr ret void } @@ -56,7 +56,7 @@ ; CHECK-LABEL: udiv32( ; CHECK: div.u32 ; CHECK-NOT: div. - %d = udiv i32 %a, %b + %d = udiv nof i32 %a, %b store i32 %d, i32* %retptr ret void } Index: test/CodeGen/NVPTX/divrem-combine.ll =================================================================== --- test/CodeGen/NVPTX/divrem-combine.ll +++ test/CodeGen/NVPTX/divrem-combine.ll @@ -16,7 +16,7 @@ ; CHECK-LABEL: sdiv32( define void @sdiv32(i32 %n, i32 %d, i32* %quot_ret, i32* %rem_ret) { ; CHECK: div.s32 [[quot:%r[0-9]+]], [[num:%r[0-9]+]], [[den:%r[0-9]+]]; - %quot = sdiv i32 %n, %d + %quot = sdiv nof i32 %n, %d ; O0: rem.s32 ; (This is unfortunately order-sensitive, even though mul is commutative.) @@ -34,7 +34,7 @@ ; CHECK-LABEL: udiv32( define void @udiv32(i32 %n, i32 %d, i32* %quot_ret, i32* %rem_ret) { ; CHECK: div.u32 [[quot:%r[0-9]+]], [[num:%r[0-9]+]], [[den:%r[0-9]+]]; - %quot = udiv i32 %n, %d + %quot = udiv nof i32 %n, %d ; O0: rem.u32 @@ -58,7 +58,7 @@ define void @mismatched_types1(i32 %n, i32 %d, i32* %quot_ret, i32* %rem_ret) { ; CHECK: div.u32 ; CHECK: rem.s32 - %quot = udiv i32 %n, %d + %quot = udiv nof i32 %n, %d %rem = srem i32 %n, %d store i32 %quot, i32* %quot_ret store i32 %rem, i32* %rem_ret @@ -69,7 +69,7 @@ define void @mismatched_types2(i32 %n, i32 %d, i32* %quot_ret, i32* %rem_ret) { ; CHECK: div.s32 ; CHECK: rem.u32 - %quot = sdiv i32 %n, %d + %quot = sdiv nof i32 %n, %d %rem = urem i32 %n, %d store i32 %quot, i32* %quot_ret store i32 %rem, i32* %rem_ret @@ -82,7 +82,7 @@ define void @mismatched_inputs1(i32 %n, i32 %d, i32* %quot_ret, i32* %rem_ret) { ; CHECK: div.s32 ; CHECK: rem.s32 - %quot = sdiv i32 %n, %d + %quot = sdiv nof i32 %n, %d %rem = srem i32 %d, %n store i32 %quot, i32* %quot_ret store i32 %rem, i32* %rem_ret @@ -93,7 +93,7 @@ define void @mismatched_inputs2(i32 %n1, i32 %n2, i32 %d, i32* %quot_ret, i32* %rem_ret) { ; CHECK: div.s32 ; CHECK: rem.s32 - %quot = sdiv i32 %n1, %d + %quot = sdiv nof i32 %n1, %d %rem = srem i32 %n2, %d store i32 %quot, i32* %quot_ret store i32 %rem, i32* %rem_ret @@ -104,7 +104,7 @@ define void @mismatched_inputs3(i32 %n, i32 %d1, i32 %d2, i32* %quot_ret, i32* %rem_ret) { ; CHECK: div.s32 ; CHECK: rem.s32 - %quot = sdiv i32 %n, %d1 + %quot = sdiv nof i32 %n, %d1 %rem = srem i32 %n, %d2 store i32 %quot, i32* %quot_ret store i32 %rem, i32* %rem_ret Index: test/CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll =================================================================== --- test/CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll +++ test/CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll @@ -13,7 +13,7 @@ %tmp14 = load <4 x i32>, <4 x i32>* %tmp1011, align 16 ; <<4 x i32>> [#uses=1] %tmp1516 = bitcast float* %argB to <4 x i32>* ; <<4 x i32>*> [#uses=1] %tmp18 = load <4 x i32>, <4 x i32>* %tmp1516, align 16 ; <<4 x i32>> [#uses=1] - %tmp19 = sdiv <4 x i32> %tmp14, %tmp18 ; <<4 x i32>> [#uses=1] + %tmp19 = sdiv nof <4 x i32> %tmp14, %tmp18 ; <<4 x i32>> [#uses=1] store <4 x i32> %tmp19, <4 x i32>* %tmp89, align 16 ret void Index: test/CodeGen/PowerPC/2008-03-26-CoalescerBug.ll =================================================================== --- test/CodeGen/PowerPC/2008-03-26-CoalescerBug.ll +++ test/CodeGen/PowerPC/2008-03-26-CoalescerBug.ll @@ -4,7 +4,7 @@ entry: %tmp50 = load i32, i32* null, align 4 ; <i32> [#uses=1] %tmp5051 = zext i32 %tmp50 to i64 ; <i64> [#uses=3] - %tmp53 = udiv i64 %byteStart, %tmp5051 ; <i64> [#uses=1] + %tmp53 = udiv nof i64 %byteStart, %tmp5051 ; <i64> [#uses=1] %tmp5354 = trunc i64 %tmp53 to i32 ; <i32> [#uses=1] %tmp62 = urem i64 %byteStart, %tmp5051 ; <i64> [#uses=1] %tmp94 = add i32 0, 1 ; <i32> [#uses=1] Index: test/CodeGen/PowerPC/PR33636.ll =================================================================== --- test/CodeGen/PowerPC/PR33636.ll +++ test/CodeGen/PowerPC/PR33636.ll @@ -335,7 +335,7 @@ br label %109 ; <label>:109: ; preds = %108, %107 - %110 = phi i32 [ sdiv (i32 32, i32 zext (i1 icmp eq (i32* getelementptr inbounds ([6 x i32], [6 x i32]* @g_756, i64 0, i64 0), i32* getelementptr inbounds ([9 x i32], [9 x i32]* @g_3708, i64 0, i64 4)) to i32)), %108 ], [ 32, %107 ] + %110 = phi i32 [ sdiv nof (i32 32, i32 zext (i1 icmp eq (i32* getelementptr inbounds ([6 x i32], [6 x i32]* @g_756, i64 0, i64 0), i32* getelementptr inbounds ([9 x i32], [9 x i32]* @g_3708, i64 0, i64 4)) to i32)), %108 ], [ 32, %107 ] %111 = trunc i32 %110 to i8 %112 = icmp ne i8 %111, 0 %113 = and i1 %112, icmp eq (i32* getelementptr inbounds ([6 x i32], [6 x i32]* @g_756, i64 0, i64 0), i32* getelementptr inbounds ([9 x i32], [9 x i32]* @g_3708, i64 0, i64 4)) Index: test/CodeGen/PowerPC/ctrloop-i64.ll =================================================================== --- test/CodeGen/PowerPC/ctrloop-i64.ll +++ test/CodeGen/PowerPC/ctrloop-i64.ll @@ -12,7 +12,7 @@ %x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ] %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06 %0 = load i64, i64* %arrayidx, align 8 - %conv = udiv i64 %x.05, %d + %conv = udiv nof i64 %x.05, %d %conv1 = add i64 %conv, %0 %inc = add nsw i32 %i.06, 1 %exitcond = icmp eq i32 %inc, 2048 @@ -34,7 +34,7 @@ %x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ] %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06 %0 = load i64, i64* %arrayidx, align 8 - %conv = sdiv i64 %x.05, %d + %conv = sdiv nof i64 %x.05, %d %conv1 = add i64 %conv, %0 %inc = add nsw i32 %i.06, 1 %exitcond = icmp eq i32 %inc, 2048 Index: test/CodeGen/PowerPC/ctrloop-intrin.ll =================================================================== --- test/CodeGen/PowerPC/ctrloop-intrin.ll +++ test/CodeGen/PowerPC/ctrloop-intrin.ll @@ -166,7 +166,7 @@ %mul45 = fmul double %.lcssa, 1.000000e+06 %add = fadd double %mul45, 5.000000e-01 %conv46 = fptoui double %add to i64 - %div47 = udiv i64 %conv46, 1000000 + %div47 = udiv nof i64 %conv46, 1000000 %5 = mul i64 %div47, -1000000 %sub49 = add i64 %conv46, %5 %call50 = call i8* @halide_int64_to_string(i8* %dst.addr.0, i8* %end, i64 %div47, i32 1) #3 Index: test/CodeGen/PowerPC/ctrloop-udivti3.ll =================================================================== --- test/CodeGen/PowerPC/ctrloop-udivti3.ll +++ test/CodeGen/PowerPC/ctrloop-udivti3.ll @@ -15,7 +15,7 @@ %i.018 = add i64 %i.018.in, -1 %jj = sext i64 %i.018 to i128 %add.i = or i128 %jj, undef - %div.i = udiv i128 %add.i, %jj + %div.i = udiv nof i128 %add.i, %jj %conv3.i11 = trunc i128 %div.i to i64 store i64 %conv3.i11, i64* undef, align 8 %cmp = icmp eq i64 %i.018, 0 Index: test/CodeGen/PowerPC/div-2.ll =================================================================== --- test/CodeGen/PowerPC/div-2.ll +++ test/CodeGen/PowerPC/div-2.ll @@ -3,28 +3,28 @@ define i32 @test1(i32 %X) { %Y = and i32 %X, 15 ; <i32> [#uses=1] - %Z = sdiv i32 %Y, 4 ; <i32> [#uses=1] + %Z = sdiv nof i32 %Y, 4 ; <i32> [#uses=1] ret i32 %Z } define i32 @test2(i32 %W) { %X = and i32 %W, 15 ; <i32> [#uses=1] %Y = sub i32 16, %X ; <i32> [#uses=1] - %Z = sdiv i32 %Y, 4 ; <i32> [#uses=1] + %Z = sdiv nof i32 %Y, 4 ; <i32> [#uses=1] ret i32 %Z } define i32 @test3(i32 %W) { %X = and i32 %W, 15 ; <i32> [#uses=1] %Y = sub i32 15, %X ; <i32> [#uses=1] - %Z = sdiv i32 %Y, 4 ; <i32> [#uses=1] + %Z = sdiv nof i32 %Y, 4 ; <i32> [#uses=1] ret i32 %Z } define i32 @test4(i32 %W) { %X = and i32 %W, 2 ; <i32> [#uses=1] %Y = sub i32 5, %X ; <i32> [#uses=1] - %Z = sdiv i32 %Y, 2 ; <i32> [#uses=1] + %Z = sdiv nof i32 %Y, 2 ; <i32> [#uses=1] ret i32 %Z } Index: test/CodeGen/PowerPC/expand-foldable-isel.ll =================================================================== --- test/CodeGen/PowerPC/expand-foldable-isel.ll +++ test/CodeGen/PowerPC/expand-foldable-isel.ll @@ -64,7 +64,7 @@ cond.false21.i156: %add23.i154 = add nsw i32 %temp_id.sroa.21.1, 1 - %div24.i155 = sdiv i32 %add23.i154, 2 + %div24.i155 = sdiv nof i32 %add23.i154, 2 br label %while.cond11 } Index: test/CodeGen/PowerPC/fast-isel-call.ll =================================================================== --- test/CodeGen/PowerPC/fast-isel-call.ll +++ test/CodeGen/PowerPC/fast-isel-call.ll @@ -90,10 +90,10 @@ ; ret void ;} -; Intrinsic calls not yet implemented, and udiv isn't one for PPC anyway. +; Intrinsic calls not yet implemented, and udiv nof isn't one for PPC anyway. ;define i32 @LibCall(i32 %a, i32 %b) { ;entry: -; %tmp1 = udiv i32 %a, %b ; <i32> [#uses=1] +; %tmp1 = udiv nof i32 %a, %b ; <i32> [#uses=1] ; ret i32 %tmp1 ;} Index: test/CodeGen/PowerPC/ppc64-P9-mod.ll =================================================================== --- test/CodeGen/PowerPC/ppc64-P9-mod.ll +++ test/CodeGen/PowerPC/ppc64-P9-mod.ll @@ -78,7 +78,7 @@ entry: %rem = srem i32 %a, %b store i32 %rem, i32* @mod_resultsw, align 4 - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b store i32 %div, i32* @div_resultsw, align 4 ret void ; CHECK-LABEL: modulo_div_sw @@ -101,7 +101,7 @@ entry: %rem = srem i32 %a, %c store i32 %rem, i32* @mod_resultsw, align 4 - %div = sdiv i32 %b, %c + %div = sdiv nof i32 %b, %c store i32 %div, i32* @div_resultsw, align 4 ret void ; CHECK-LABEL: modulo_div_abc_sw @@ -119,7 +119,7 @@ entry: %rem = urem i32 %a, %b store i32 %rem, i32* @mod_resultuw, align 4 - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b store i32 %div, i32* @div_resultuw, align 4 ret void ; CHECK-LABEL: modulo_div_uw @@ -142,7 +142,7 @@ entry: %rem = srem i32 %a, %b store i32 %rem, i32* @mod_resultsw, align 4 - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b store i32 %div, i32* @div_resultsw, align 4 ret void ; CHECK-LABEL: modulo_div_swuw @@ -160,7 +160,7 @@ entry: %rem = urem i64 %a, %b store i64 %rem, i64* @mod_resultud, align 8 - %div = sdiv i64 %a, %b + %div = sdiv nof i64 %a, %b store i64 %div, i64* @div_resultsd, align 8 ret void ; CHECK-LABEL: modulo_div_udsd @@ -237,7 +237,7 @@ ; see the div in the other block. define void @blocks_modulo_div_sw(i32 signext %a, i32 signext %b, i32 signext %c) local_unnamed_addr { entry: - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b store i32 %div, i32* @div_resultsw, align 4 %cmp = icmp sgt i32 %c, 0 br i1 %cmp, label %if.then, label %if.end Index: test/CodeGen/PowerPC/ppc64-r2-alloc.ll =================================================================== --- test/CodeGen/PowerPC/ppc64-r2-alloc.ll +++ test/CodeGen/PowerPC/ppc64-r2-alloc.ll @@ -4,71 +4,71 @@ define signext i32 @foo(i32 signext %a, i32 signext %d) #0 { entry: - %div = sdiv i32 %a, %d - %div1 = sdiv i32 %div, %d - %div2 = sdiv i32 %div1, %d - %div3 = sdiv i32 %div2, %d - %div4 = sdiv i32 %div3, %d - %div5 = sdiv i32 %div4, %d - %div6 = sdiv i32 %div5, %d - %div7 = sdiv i32 %div6, %d - %div8 = sdiv i32 %div7, %d - %div9 = sdiv i32 %div8, %d - %div10 = sdiv i32 %div9, %d - %div11 = sdiv i32 %div10, %d - %div12 = sdiv i32 %div11, %d - %div13 = sdiv i32 %div12, %d - %div14 = sdiv i32 %div13, %d - %div15 = sdiv i32 %div14, %d - %div16 = sdiv i32 %div15, %d - %div17 = sdiv i32 %div16, %d - %div18 = sdiv i32 %div17, %d - %div19 = sdiv i32 %div18, %d - %div20 = sdiv i32 %div19, %d - %div21 = sdiv i32 %div20, %d - %div22 = sdiv i32 %div21, %d - %div23 = sdiv i32 %div22, %d - %div24 = sdiv i32 %div23, %d - %div25 = sdiv i32 %div24, %d - %div26 = sdiv i32 %div25, %d - %div27 = sdiv i32 %div26, %d - %div28 = sdiv i32 %div27, %d - %div29 = sdiv i32 %div28, %d - %div30 = sdiv i32 %div29, %d - %div31 = sdiv i32 %div30, %d - %div32 = sdiv i32 %div31, %d - %div33 = sdiv i32 %div32, %div31 - %div34 = sdiv i32 %div33, %div30 - %div35 = sdiv i32 %div34, %div29 - %div36 = sdiv i32 %div35, %div28 - %div37 = sdiv i32 %div36, %div27 - %div38 = sdiv i32 %div37, %div26 - %div39 = sdiv i32 %div38, %div25 - %div40 = sdiv i32 %div39, %div24 - %div41 = sdiv i32 %div40, %div23 - %div42 = sdiv i32 %div41, %div22 - %div43 = sdiv i32 %div42, %div21 - %div44 = sdiv i32 %div43, %div20 - %div45 = sdiv i32 %div44, %div19 - %div46 = sdiv i32 %div45, %div18 - %div47 = sdiv i32 %div46, %div17 - %div48 = sdiv i32 %div47, %div16 - %div49 = sdiv i32 %div48, %div15 - %div50 = sdiv i32 %div49, %div14 - %div51 = sdiv i32 %div50, %div13 - %div52 = sdiv i32 %div51, %div12 - %div53 = sdiv i32 %div52, %div11 - %div54 = sdiv i32 %div53, %div10 - %div55 = sdiv i32 %div54, %div9 - %div56 = sdiv i32 %div55, %div8 - %div57 = sdiv i32 %div56, %div7 - %div58 = sdiv i32 %div57, %div6 - %div59 = sdiv i32 %div58, %div5 - %div60 = sdiv i32 %div59, %div4 - %div61 = sdiv i32 %div60, %div3 - %div62 = sdiv i32 %div61, %div2 - %div63 = sdiv i32 %div62, %div1 - %div64 = sdiv i32 %div63, %div + %div = sdiv nof i32 %a, %d + %div1 = sdiv nof i32 %div, %d + %div2 = sdiv nof i32 %div1, %d + %div3 = sdiv nof i32 %div2, %d + %div4 = sdiv nof i32 %div3, %d + %div5 = sdiv nof i32 %div4, %d + %div6 = sdiv nof i32 %div5, %d + %div7 = sdiv nof i32 %div6, %d + %div8 = sdiv nof i32 %div7, %d + %div9 = sdiv nof i32 %div8, %d + %div10 = sdiv nof i32 %div9, %d + %div11 = sdiv nof i32 %div10, %d + %div12 = sdiv nof i32 %div11, %d + %div13 = sdiv nof i32 %div12, %d + %div14 = sdiv nof i32 %div13, %d + %div15 = sdiv nof i32 %div14, %d + %div16 = sdiv nof i32 %div15, %d + %div17 = sdiv nof i32 %div16, %d + %div18 = sdiv nof i32 %div17, %d + %div19 = sdiv nof i32 %div18, %d + %div20 = sdiv nof i32 %div19, %d + %div21 = sdiv nof i32 %div20, %d + %div22 = sdiv nof i32 %div21, %d + %div23 = sdiv nof i32 %div22, %d + %div24 = sdiv nof i32 %div23, %d + %div25 = sdiv nof i32 %div24, %d + %div26 = sdiv nof i32 %div25, %d + %div27 = sdiv nof i32 %div26, %d + %div28 = sdiv nof i32 %div27, %d + %div29 = sdiv nof i32 %div28, %d + %div30 = sdiv nof i32 %div29, %d + %div31 = sdiv nof i32 %div30, %d + %div32 = sdiv nof i32 %div31, %d + %div33 = sdiv nof i32 %div32, %div31 + %div34 = sdiv nof i32 %div33, %div30 + %div35 = sdiv nof i32 %div34, %div29 + %div36 = sdiv nof i32 %div35, %div28 + %div37 = sdiv nof i32 %div36, %div27 + %div38 = sdiv nof i32 %div37, %div26 + %div39 = sdiv nof i32 %div38, %div25 + %div40 = sdiv nof i32 %div39, %div24 + %div41 = sdiv nof i32 %div40, %div23 + %div42 = sdiv nof i32 %div41, %div22 + %div43 = sdiv nof i32 %div42, %div21 + %div44 = sdiv nof i32 %div43, %div20 + %div45 = sdiv nof i32 %div44, %div19 + %div46 = sdiv nof i32 %div45, %div18 + %div47 = sdiv nof i32 %div46, %div17 + %div48 = sdiv nof i32 %div47, %div16 + %div49 = sdiv nof i32 %div48, %div15 + %div50 = sdiv nof i32 %div49, %div14 + %div51 = sdiv nof i32 %div50, %div13 + %div52 = sdiv nof i32 %div51, %div12 + %div53 = sdiv nof i32 %div52, %div11 + %div54 = sdiv nof i32 %div53, %div10 + %div55 = sdiv nof i32 %div54, %div9 + %div56 = sdiv nof i32 %div55, %div8 + %div57 = sdiv nof i32 %div56, %div7 + %div58 = sdiv nof i32 %div57, %div6 + %div59 = sdiv nof i32 %div58, %div5 + %div60 = sdiv nof i32 %div59, %div4 + %div61 = sdiv nof i32 %div60, %div3 + %div62 = sdiv nof i32 %div61, %div2 + %div63 = sdiv nof i32 %div62, %div1 + %div64 = sdiv nof i32 %div63, %div ret i32 %div64 } Index: test/CodeGen/PowerPC/pr26690.ll =================================================================== --- test/CodeGen/PowerPC/pr26690.ll +++ test/CodeGen/PowerPC/pr26690.ll @@ -67,7 +67,7 @@ br i1 %tobool5, label %if.end, label %if.then if.then: ; preds = %while.end - %div = sdiv i32 %5, %mul + %div = sdiv nof i32 %5, %mul store i32 %div, i32* @g, align 4, !tbaa !1 br label %if.end Index: test/CodeGen/PowerPC/sdiv-pow2.ll =================================================================== --- test/CodeGen/PowerPC/sdiv-pow2.ll +++ test/CodeGen/PowerPC/sdiv-pow2.ll @@ -6,7 +6,7 @@ ; Function Attrs: nounwind readnone define signext i32 @foo4(i32 signext %a) #0 { entry: - %div = sdiv i32 %a, 8 + %div = sdiv nof i32 %a, 8 ret i32 %div ; CHECK-LABEL: @foo4 @@ -19,7 +19,7 @@ ; Function Attrs: nounwind readnone define i64 @foo8(i64 %a) #0 { entry: - %div = sdiv i64 %a, 8 + %div = sdiv nof i64 %a, 8 ret i64 %div ; CHECK-LABEL: @foo8 @@ -35,7 +35,7 @@ ; Function Attrs: nounwind readnone define signext i32 @foo4n(i32 signext %a) #0 { entry: - %div = sdiv i32 %a, -8 + %div = sdiv nof i32 %a, -8 ret i32 %div ; CHECK-LABEL: @foo4n @@ -49,7 +49,7 @@ ; Function Attrs: nounwind readnone define i64 @foo8n(i64 %a) #0 { entry: - %div = sdiv i64 %a, -8 + %div = sdiv nof i64 %a, -8 ret i64 %div ; CHECK-LABEL: @foo8n Index: test/CodeGen/PowerPC/select_const.ll =================================================================== --- test/CodeGen/PowerPC/select_const.ll +++ test/CodeGen/PowerPC/select_const.ll @@ -445,7 +445,7 @@ ; NO_ISEL-NEXT: addi 3, 0, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i8 -4, i8 23 - %bo = sdiv i8 %sel, 5 + %bo = sdiv nof i8 %sel, 5 ret i8 %bo } @@ -469,7 +469,7 @@ ; NO_ISEL-NEXT: addi 3, 4, 0 ; NO_ISEL-NEXT: blr %sel = select i1 %cond, i8 -4, i8 23 - %bo = udiv i8 %sel, 5 + %bo = udiv nof i8 %sel, 5 ret i8 %bo } Index: test/CodeGen/RISCV/div.ll =================================================================== --- test/CodeGen/RISCV/div.ll +++ test/CodeGen/RISCV/div.ll @@ -16,7 +16,7 @@ ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret - %1 = udiv i32 %a, %b + %1 = udiv nof i32 %a, %b ret i32 %1 } @@ -35,7 +35,7 @@ ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret - %1 = udiv i32 %a, 5 + %1 = udiv nof i32 %a, 5 ret i32 %1 } @@ -51,7 +51,7 @@ ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret - %1 = udiv i32 %a, 8 + %1 = udiv nof i32 %a, 8 ret i32 %1 } @@ -69,7 +69,7 @@ ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret - %1 = udiv i64 %a, %b + %1 = udiv nof i64 %a, %b ret i64 %1 } @@ -89,7 +89,7 @@ ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret - %1 = udiv i64 %a, 5 + %1 = udiv nof i64 %a, 5 ret i64 %1 } @@ -107,7 +107,7 @@ ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret - %1 = sdiv i32 %a, %b + %1 = sdiv nof i32 %a, %b ret i32 %1 } @@ -126,7 +126,7 @@ ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret - %1 = sdiv i32 %a, 5 + %1 = sdiv nof i32 %a, 5 ret i32 %1 } @@ -145,7 +145,7 @@ ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret - %1 = sdiv i32 %a, 8 + %1 = sdiv nof i32 %a, 8 ret i32 %1 } @@ -163,7 +163,7 @@ ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret - %1 = sdiv i64 %a, %b + %1 = sdiv nof i64 %a, %b ret i64 %1 } @@ -183,6 +183,6 @@ ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: ret - %1 = sdiv i64 %a, 5 + %1 = sdiv nof i64 %a, 5 ret i64 %1 } Index: test/CodeGen/SPARC/64bit.ll =================================================================== --- test/CodeGen/SPARC/64bit.ll +++ test/CodeGen/SPARC/64bit.ll @@ -216,14 +216,14 @@ ; CHECK: signed_divide ; CHECK: sdivx %i0, %i1, %i0 define i64 @signed_divide(i64 %a, i64 %b) { - %r = sdiv i64 %a, %b + %r = sdiv nof i64 %a, %b ret i64 %r } ; CHECK: unsigned_divide ; CHECK: udivx %i0, %i1, %i0 define i64 @unsigned_divide(i64 %a, i64 %b) { - %r = udiv i64 %a, %b + %r = udiv nof i64 %a, %b ret i64 %r } Index: test/CodeGen/SPARC/LeonReplaceSDIVPassUT.ll =================================================================== --- test/CodeGen/SPARC/LeonReplaceSDIVPassUT.ll +++ test/CodeGen/SPARC/LeonReplaceSDIVPassUT.ll @@ -6,6 +6,6 @@ define i32 @lbr59(i32 %a, i32 %b) { - %r = sdiv i32 %a, %b + %r = sdiv nof i32 %a, %b ret i32 %r } Index: test/CodeGen/SPARC/basictest.ll =================================================================== --- test/CodeGen/SPARC/basictest.ll +++ test/CodeGen/SPARC/basictest.ll @@ -41,7 +41,7 @@ ; CHECK: wr %g0, %o2, %y ; CHECK: sdiv %o0, %o1, %o0 define i32 @signed_divide(i32 %a, i32 %b) { - %r = sdiv i32 %a, %b + %r = sdiv nof i32 %a, %b ret i32 %r } @@ -49,7 +49,7 @@ ; CHECK: wr %g0, %g0, %y ; CHECK: udiv %o0, %o1, %o0 define i32 @unsigned_divide(i32 %a, i32 %b) { - %r = udiv i32 %a, %b + %r = udiv nof i32 %a, %b ret i32 %r } Index: test/CodeGen/SPARC/multiple-div.ll =================================================================== --- test/CodeGen/SPARC/multiple-div.ll +++ test/CodeGen/SPARC/multiple-div.ll @@ -14,8 +14,8 @@ ; CHECK: add define i32 @two_divides(i32 %a, i32 %b) { - %r = udiv i32 %a, %b - %r2 = udiv i32 %b, %a + %r = udiv nof i32 %a, %b + %r2 = udiv nof i32 %b, %a %r3 = add i32 %r, %r2 ret i32 %r3 } Index: test/CodeGen/SPARC/soft-mul-div.ll =================================================================== --- test/CodeGen/SPARC/soft-mul-div.ll +++ test/CodeGen/SPARC/soft-mul-div.ll @@ -24,42 +24,42 @@ define i32 @test_sdiv32(i32 %a, i32 %b) #0 { ; CHECK-LABEL: test_sdiv32 ; CHECK: call .div - %d = sdiv i32 %a, %b + %d = sdiv nof i32 %a, %b ret i32 %d } define i16 @test_sdiv16(i16 %a, i16 %b) #0 { ; CHECK-LABEL: test_sdiv16 ; CHECK: call .div - %d = sdiv i16 %a, %b + %d = sdiv nof i16 %a, %b ret i16 %d } define i8 @test_sdiv8(i8 %a, i8 %b) #0 { ; CHECK-LABEL: test_sdiv8 ; CHECK: call .div - %d = sdiv i8 %a, %b + %d = sdiv nof i8 %a, %b ret i8 %d } define i32 @test_udiv32(i32 %a, i32 %b) #0 { ; CHECK-LABEL: test_udiv32 ; CHECK: call .udiv - %d = udiv i32 %a, %b + %d = udiv nof i32 %a, %b ret i32 %d } define i16 @test_udiv16(i16 %a, i16 %b) #0 { ; CHECK-LABEL: test_udiv16 ; CHECK: call .udiv - %d = udiv i16 %a, %b + %d = udiv nof i16 %a, %b ret i16 %d } define i8 @test_udiv8(i8 %a, i8 %b) #0 { ; CHECK-LABEL: test_udiv8 ; CHECK: call .udiv - %d = udiv i8 %a, %b + %d = udiv nof i8 %a, %b ret i8 %d } Index: test/CodeGen/SystemZ/copy-physreg-128.ll =================================================================== --- test/CodeGen/SystemZ/copy-physreg-128.ll +++ test/CodeGen/SystemZ/copy-physreg-128.ll @@ -35,7 +35,7 @@ CF280: ; preds = %CF280, %CF301 %I139 = insertelement <8 x i64> %Shuff49, i64 undef, i32 2 - %B155 = udiv <8 x i64> %I8, %I139 + %B155 = udiv nof <8 x i64> %I8, %I139 %Cmp157 = icmp ne i64 -1, undef br i1 %Cmp157, label %CF280, label %CF281 Index: test/CodeGen/SystemZ/expand-zext-pseudo.ll =================================================================== --- test/CodeGen/SystemZ/expand-zext-pseudo.ll +++ test/CodeGen/SystemZ/expand-zext-pseudo.ll @@ -41,7 +41,7 @@ CF827: ; preds = %CF923, %CF911, %CF875 %Sl142 = select i1 undef, i64 undef, i64 -1 - %B148 = sdiv i32 409071, 409071 + %B148 = sdiv nof i32 409071, 409071 %E153 = extractelement <2 x i32> %Shuff124, i32 1 br label %CF911 @@ -72,7 +72,7 @@ br i1 %E231, label %CF828, label %CF829 CF829: ; preds = %CF909, %CF829, %CF905 - %B234 = udiv i16 %L26, %L84 + %B234 = udiv nof i16 %L26, %L84 br i1 undef, label %CF829, label %CF894 CF894: ; preds = %CF894, %CF829 @@ -81,7 +81,7 @@ br i1 %Sl241, label %CF894, label %CF907 CF907: ; preds = %CF894 - %B247 = udiv i32 0, %E153 + %B247 = udiv nof i32 0, %E153 %PC248 = bitcast i64* %2 to i8* br label %CF909 Index: test/CodeGen/SystemZ/int-div-01.ll =================================================================== --- test/CodeGen/SystemZ/int-div-01.ll +++ test/CodeGen/SystemZ/int-div-01.ll @@ -11,7 +11,7 @@ ; CHECK: dsgfr %r0, %r4 ; CHECK: st %r1, 0(%r2) ; CHECK: br %r14 - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b store i32 %div, i32 *%dest ret void } @@ -38,7 +38,7 @@ ; CHECK-NOT: dsgfr ; CHECK: or %r2, %r3 ; CHECK: br %r14 - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b %rem = srem i32 %a, %b %or = or i32 %rem, %div ret i32 %or @@ -53,7 +53,7 @@ ; CHECK-NOT: dsgfr ; CHECK: or %r2, %r3 ; CHECK: br %r14 - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b %rem = srem i32 %a, %b %or = or i32 %rem, %div ret i32 %or @@ -70,7 +70,7 @@ ; CHECK: or %r2, %r3 ; CHECK: br %r14 %a = load i32 , i32 *%src - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b %rem = srem i32 %a, %b %or = or i32 %rem, %div ret i32 %or @@ -84,7 +84,7 @@ ; CHECK: st %r1, 0(%r2) ; CHECK: br %r14 %b = load i32 , i32 *%src - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b store i32 %div, i32 *%dest ret void } @@ -113,7 +113,7 @@ ; CHECK: or %r2, %r3 ; CHECK: br %r14 %b = load i32 , i32 *%src - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b %rem = srem i32 %a, %b %or = or i32 %rem, %div ret i32 %or @@ -202,7 +202,7 @@ ; CHECK: br %r14 %b = load i32 , i32 *%src %a = call i32 @foo() - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b store i32 %div, i32 *%dest ret void } @@ -236,16 +236,16 @@ %ret = call i32 @foo() - %div0 = sdiv i32 %ret, %val0 - %div1 = sdiv i32 %div0, %val1 - %div2 = sdiv i32 %div1, %val2 - %div3 = sdiv i32 %div2, %val3 - %div4 = sdiv i32 %div3, %val4 - %div5 = sdiv i32 %div4, %val5 - %div6 = sdiv i32 %div5, %val6 - %div7 = sdiv i32 %div6, %val7 - %div8 = sdiv i32 %div7, %val8 - %div9 = sdiv i32 %div8, %val9 + %div0 = sdiv nof i32 %ret, %val0 + %div1 = sdiv nof i32 %div0, %val1 + %div2 = sdiv nof i32 %div1, %val2 + %div3 = sdiv nof i32 %div2, %val3 + %div4 = sdiv nof i32 %div3, %val4 + %div5 = sdiv nof i32 %div4, %val5 + %div6 = sdiv nof i32 %div5, %val6 + %div7 = sdiv nof i32 %div6, %val7 + %div8 = sdiv nof i32 %div7, %val8 + %div9 = sdiv nof i32 %div8, %val9 ret i32 %div9 } Index: test/CodeGen/SystemZ/int-div-02.ll =================================================================== --- test/CodeGen/SystemZ/int-div-02.ll +++ test/CodeGen/SystemZ/int-div-02.ll @@ -13,7 +13,7 @@ ; CHECK: dlr %r2, %r4 ; CHECK: st %r3, 0(%r5) ; CHECK: br %r14 - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b store i32 %div, i32 *%dest ret void } @@ -42,7 +42,7 @@ ; CHECK-NOT: dlr ; CHECK: or %r2, %r3 ; CHECK: br %r14 - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b %rem = urem i32 %a, %b %or = or i32 %rem, %div ret i32 %or @@ -58,7 +58,7 @@ ; CHECK: st %r3, 0(%r5) ; CHECK: br %r14 %b = load i32 , i32 *%src - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b store i32 %div, i32 *%dest ret void } @@ -89,7 +89,7 @@ ; CHECK: or %r2, %r3 ; CHECK: br %r14 %b = load i32 , i32 *%src - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b %rem = urem i32 %a, %b %or = or i32 %rem, %div ret i32 %or @@ -196,16 +196,16 @@ %ret = call i32 @foo() - %div0 = udiv i32 %ret, %val0 - %div1 = udiv i32 %div0, %val1 - %div2 = udiv i32 %div1, %val2 - %div3 = udiv i32 %div2, %val3 - %div4 = udiv i32 %div3, %val4 - %div5 = udiv i32 %div4, %val5 - %div6 = udiv i32 %div5, %val6 - %div7 = udiv i32 %div6, %val7 - %div8 = udiv i32 %div7, %val8 - %div9 = udiv i32 %div8, %val9 + %div0 = udiv nof i32 %ret, %val0 + %div1 = udiv nof i32 %div0, %val1 + %div2 = udiv nof i32 %div1, %val2 + %div3 = udiv nof i32 %div2, %val3 + %div4 = udiv nof i32 %div3, %val4 + %div5 = udiv nof i32 %div4, %val5 + %div6 = udiv nof i32 %div5, %val6 + %div7 = udiv nof i32 %div6, %val7 + %div8 = udiv nof i32 %div7, %val8 + %div9 = udiv nof i32 %div8, %val9 ret i32 %div9 } Index: test/CodeGen/SystemZ/int-div-03.ll =================================================================== --- test/CodeGen/SystemZ/int-div-03.ll +++ test/CodeGen/SystemZ/int-div-03.ll @@ -13,7 +13,7 @@ ; CHECK: stg %r3, 0(%r5) ; CHECK: br %r14 %bext = sext i32 %b to i64 - %div = sdiv i64 %a, %bext + %div = sdiv nof i64 %a, %bext store i64 %div, i64 *%dest ret void } @@ -39,7 +39,7 @@ ; CHECK: ogr %r2, %r3 ; CHECK: br %r14 %bext = sext i32 %b to i64 - %div = sdiv i64 %a, %bext + %div = sdiv nof i64 %a, %bext %rem = srem i64 %a, %bext %or = or i64 %rem, %div ret i64 %or @@ -52,7 +52,7 @@ ; CHECK-NOT: dsgfr ; CHECK: br %r14 %bext = zext i32 %b to i64 - %div = sdiv i64 %a, %bext + %div = sdiv nof i64 %a, %bext store i64 %div, i64 *%dest ret void } @@ -77,7 +77,7 @@ ; CHECK: br %r14 %b = load i32 , i32 *%src %bext = sext i32 %b to i64 - %div = sdiv i64 %a, %bext + %div = sdiv nof i64 %a, %bext store i64 %div, i64 *%dest ret void } @@ -106,7 +106,7 @@ ; CHECK: br %r14 %b = load i32 , i32 *%src %bext = sext i32 %b to i64 - %div = sdiv i64 %a, %bext + %div = sdiv nof i64 %a, %bext %rem = srem i64 %a, %bext %or = or i64 %rem, %div ret i64 %or @@ -202,7 +202,7 @@ %b = load i32 , i32 *%src %a = call i64 @foo() %ext = sext i32 %b to i64 - %div = sdiv i64 %a, %ext + %div = sdiv nof i64 %a, %ext store i64 %div, i64 *%dest ret void } Index: test/CodeGen/SystemZ/int-div-04.ll =================================================================== --- test/CodeGen/SystemZ/int-div-04.ll +++ test/CodeGen/SystemZ/int-div-04.ll @@ -11,7 +11,7 @@ ; CHECK: dsgr %r2, %r4 ; CHECK: stg %r3, 0(%r5) ; CHECK: br %r14 - %div = sdiv i64 %a, %b + %div = sdiv nof i64 %a, %b store i64 %div, i64 *%dest ret void } @@ -36,7 +36,7 @@ ; CHECK-NOT: dsgr ; CHECK: ogr %r2, %r3 ; CHECK: br %r14 - %div = sdiv i64 %a, %b + %div = sdiv nof i64 %a, %b %rem = srem i64 %a, %b %or = or i64 %rem, %div ret i64 %or @@ -50,7 +50,7 @@ ; CHECK: stg %r3, 0(%r5) ; CHECK: br %r14 %b = load i64 , i64 *%src - %div = sdiv i64 %a, %b + %div = sdiv nof i64 %a, %b store i64 %div, i64 *%dest ret void } @@ -77,7 +77,7 @@ ; CHECK: ogr %r2, %r3 ; CHECK: br %r14 %b = load i64 , i64 *%src - %div = sdiv i64 %a, %b + %div = sdiv nof i64 %a, %b %rem = srem i64 %a, %b %or = or i64 %rem, %div ret i64 %or @@ -186,17 +186,17 @@ %ret = call i64 @foo() - %div0 = sdiv i64 %ret, %val0 - %div1 = sdiv i64 %div0, %val1 - %div2 = sdiv i64 %div1, %val2 - %div3 = sdiv i64 %div2, %val3 - %div4 = sdiv i64 %div3, %val4 - %div5 = sdiv i64 %div4, %val5 - %div6 = sdiv i64 %div5, %val6 - %div7 = sdiv i64 %div6, %val7 - %div8 = sdiv i64 %div7, %val8 - %div9 = sdiv i64 %div8, %val9 - %div10 = sdiv i64 %div9, %val10 + %div0 = sdiv nof i64 %ret, %val0 + %div1 = sdiv nof i64 %div0, %val1 + %div2 = sdiv nof i64 %div1, %val2 + %div3 = sdiv nof i64 %div2, %val3 + %div4 = sdiv nof i64 %div3, %val4 + %div5 = sdiv nof i64 %div4, %val5 + %div6 = sdiv nof i64 %div5, %val6 + %div7 = sdiv nof i64 %div6, %val7 + %div8 = sdiv nof i64 %div7, %val8 + %div9 = sdiv nof i64 %div8, %val9 + %div10 = sdiv nof i64 %div9, %val10 ret i64 %div10 } Index: test/CodeGen/SystemZ/int-div-05.ll =================================================================== --- test/CodeGen/SystemZ/int-div-05.ll +++ test/CodeGen/SystemZ/int-div-05.ll @@ -13,7 +13,7 @@ ; CHECK: dlgr %r2, %r4 ; CHECK: stg %r3, 0(%r5) ; CHECK: br %r14 - %div = udiv i64 %a, %b + %div = udiv nof i64 %a, %b store i64 %div, i64 *%dest ret void } @@ -42,7 +42,7 @@ ; CHECK-NOT: dlgr ; CHECK: ogr %r2, %r3 ; CHECK: br %r14 - %div = udiv i64 %a, %b + %div = udiv nof i64 %a, %b %rem = urem i64 %a, %b %or = or i64 %rem, %div ret i64 %or @@ -58,7 +58,7 @@ ; CHECK: stg %r3, 0(%r5) ; CHECK: br %r14 %b = load i64 , i64 *%src - %div = udiv i64 %a, %b + %div = udiv nof i64 %a, %b store i64 %div, i64 *%dest ret void } @@ -89,7 +89,7 @@ ; CHECK: ogr %r2, %r3 ; CHECK: br %r14 %b = load i64 , i64 *%src - %div = udiv i64 %a, %b + %div = udiv nof i64 %a, %b %rem = urem i64 %a, %b %or = or i64 %rem, %div ret i64 %or @@ -198,17 +198,17 @@ %ret = call i64 @foo() - %div0 = udiv i64 %ret, %val0 - %div1 = udiv i64 %div0, %val1 - %div2 = udiv i64 %div1, %val2 - %div3 = udiv i64 %div2, %val3 - %div4 = udiv i64 %div3, %val4 - %div5 = udiv i64 %div4, %val5 - %div6 = udiv i64 %div5, %val6 - %div7 = udiv i64 %div6, %val7 - %div8 = udiv i64 %div7, %val8 - %div9 = udiv i64 %div8, %val9 - %div10 = udiv i64 %div9, %val10 + %div0 = udiv nof i64 %ret, %val0 + %div1 = udiv nof i64 %div0, %val1 + %div2 = udiv nof i64 %div1, %val2 + %div3 = udiv nof i64 %div2, %val3 + %div4 = udiv nof i64 %div3, %val4 + %div5 = udiv nof i64 %div4, %val5 + %div6 = udiv nof i64 %div5, %val6 + %div7 = udiv nof i64 %div6, %val7 + %div8 = udiv nof i64 %div7, %val8 + %div9 = udiv nof i64 %div8, %val9 + %div10 = udiv nof i64 %div9, %val10 ret i64 %div10 } Index: test/CodeGen/SystemZ/int-div-06.ll =================================================================== --- test/CodeGen/SystemZ/int-div-06.ll +++ test/CodeGen/SystemZ/int-div-06.ll @@ -11,7 +11,7 @@ ; CHECK-DAG: srag %r2, [[REG]], 46 ; CHECK: ar %r2, [[RES1]] ; CHECK: br %r14 - %b = sdiv i32 %a, 139968 + %b = sdiv nof i32 %a, 139968 ret i32 %b } @@ -22,7 +22,7 @@ ; CHECK: msgfi [[REG]], 502748801 ; CHECK: srlg %r2, [[REG]], 46 ; CHECK: br %r14 - %b = udiv i32 %a, 139968 + %b = udiv nof i32 %a, 139968 ret i32 %b } @@ -39,7 +39,7 @@ ; CHECK: srag %r2, %r2, 15 ; CHECK: agr %r2, [[RES1]] ; CHECK: br %r14 - %b = sdiv i64 %a, 139968 + %b = sdiv nof i64 %a, 139968 ret i64 %b } @@ -51,6 +51,6 @@ ; CHECK: mlgr %r2, [[CONST]] ; CHECK: srlg %r2, %r2, 15 ; CHECK: br %r14 - %b = udiv i64 %a, 139968 + %b = udiv nof i64 %a, 139968 ret i64 %b } Index: test/CodeGen/SystemZ/int-mul-08.ll =================================================================== --- test/CodeGen/SystemZ/int-mul-08.ll +++ test/CodeGen/SystemZ/int-mul-08.ll @@ -78,7 +78,7 @@ ; CHECK: mlgr %r2, ; CHECK: srlg %r2, %r2, ; CHECK: br %r14 - %res = udiv i64 %a, 1234 + %res = udiv nof i64 %a, 1234 ret i64 %res } Index: test/CodeGen/SystemZ/list-ilp-crash.ll =================================================================== --- test/CodeGen/SystemZ/list-ilp-crash.ll +++ test/CodeGen/SystemZ/list-ilp-crash.ll @@ -14,7 +14,7 @@ br i1 %Cmp84, label %CF245, label %CF260 CF260: ; preds = %CF245 - %B156 = sdiv <4 x i8> %Shuff57, %Shuff57 + %B156 = sdiv nof <4 x i8> %Shuff57, %Shuff57 br label %CF255 CF255: ; preds = %CF255, %CF260 Index: test/CodeGen/SystemZ/loop-03.ll =================================================================== --- test/CodeGen/SystemZ/loop-03.ll +++ test/CodeGen/SystemZ/loop-03.ll @@ -24,7 +24,7 @@ ; CHECK-NOT: 16-byte Folded Reload %2 = load i64, i64* undef, align 8 - %3 = udiv i64 128, %2 + %3 = udiv nof i64 128, %2 %4 = mul i64 %3, %2 %5 = load i64, i64* undef, align 8 switch i32 undef, label %36 [ @@ -64,7 +64,7 @@ %17 = load i64, i64* %13, align 8 %18 = icmp ult i64 %15, %17 %19 = select i1 %18, i64 %15, i64 %17 - %20 = udiv i64 %19, %4 + %20 = udiv nof i64 %19, %4 %21 = icmp ugt i64 %20, 1 %22 = select i1 %21, i64 %20, i64 1 %23 = sub i64 %22, 0 Index: test/CodeGen/SystemZ/splitMove_undefReg_mverifier.ll =================================================================== --- test/CodeGen/SystemZ/splitMove_undefReg_mverifier.ll +++ test/CodeGen/SystemZ/splitMove_undefReg_mverifier.ll @@ -19,7 +19,7 @@ CF261: ; preds = %BB %Shuff = shufflevector <2 x i16> zeroinitializer, <2 x i16> zeroinitializer, <2 x i32> <i32 undef, i32 3> %I = insertelement <8 x i8> zeroinitializer, i8 69, i32 3 - %B = udiv i8 -99, 33 + %B = udiv nof i8 -99, 33 %Tr = trunc i64 -1 to i32 %Sl = select i1 true, i64* %2, i64* %2 %L5 = load i64, i64* %Sl @@ -66,7 +66,7 @@ %E25 = extractelement <4 x i16> zeroinitializer, i32 3 %Shuff26 = shufflevector <4 x i16> %Shuff7, <4 x i16> zeroinitializer, <4 x i32> <i32 2, i32 4, i32 6, i32 undef> %I27 = insertelement <4 x i16> zeroinitializer, i16 %Sl22, i32 0 - %B28 = udiv i16 %Sl11, -1 + %B28 = udiv nof i16 %Sl11, -1 %ZE = zext i1 true to i32 %Sl29 = select i1 true, i8 -99, i8 33 %Cmp30 = fcmp ord double 0xC275146F92573C4, 0x16FB351AF5F9C998 @@ -338,7 +338,7 @@ %E190 = extractelement <4 x i16> %B9, i32 3 %Shuff191 = shufflevector <4 x i16> %Shuff26, <4 x i16> %Shuff26, <4 x i32> <i32 6, i32 0, i32 2, i32 4> %I192 = insertelement <1 x i32> %I122, i32 %3, i32 0 - %B193 = udiv i8 %5, %L168 + %B193 = udiv nof i8 %5, %L168 %Se194 = sext <8 x i1> %Sl71 to <8 x i32> %Sl195 = select i1 %Cmp188, i8 %L182, i8 %L168 %Cmp196 = icmp slt i16 %B77, %Sl102 Index: test/CodeGen/SystemZ/splitMove_undefReg_mverifier_2.ll =================================================================== --- test/CodeGen/SystemZ/splitMove_undefReg_mverifier_2.ll +++ test/CodeGen/SystemZ/splitMove_undefReg_mverifier_2.ll @@ -48,7 +48,7 @@ CF364: ; preds = %CF335 store i64 %E32, i64* %Sl37 - %B57 = udiv <8 x i64> %I42, %B50 + %B57 = udiv nof <8 x i64> %I42, %B50 %L61 = load i64, i64* %Sl37 %Sl65 = select i1 undef, i1 %Cmp52, i1 true br i1 %Sl65, label %CF, label %CF333 Index: test/CodeGen/SystemZ/vec-div-01.ll =================================================================== --- test/CodeGen/SystemZ/vec-div-01.ll +++ test/CodeGen/SystemZ/vec-div-01.ll @@ -22,7 +22,7 @@ ; CHECK-DAG: vlvgb [[REG]], {{%r[0-9]+}}, 13 ; CHECK-DAG: vlvgb [[REG]], {{%r[0-9]+}}, 14 ; CHECK: br %r14 - %ret = sdiv <16 x i8> %val1, %val2 + %ret = sdiv nof <16 x i8> %val1, %val2 ret <16 x i8> %ret } @@ -37,7 +37,7 @@ ; CHECK-DAG: vlvgh [[REG]], {{%r[0-9]+}}, 5 ; CHECK-DAG: vlvgh [[REG]], {{%r[0-9]+}}, 6 ; CHECK: br %r14 - %ret = sdiv <8 x i16> %val1, %val2 + %ret = sdiv nof <8 x i16> %val1, %val2 ret <8 x i16> %ret } @@ -48,7 +48,7 @@ ; CHECK-DAG: vlvgf [[REG]], {{%r[0-9]+}}, 0 ; CHECK-DAG: vlvgf [[REG]], {{%r[0-9]+}}, 2 ; CHECK: br %r14 - %ret = sdiv <4 x i32> %val1, %val2 + %ret = sdiv nof <4 x i32> %val1, %val2 ret <4 x i32> %ret } @@ -57,7 +57,7 @@ ; CHECK-LABEL: f4: ; CHECK: vlvgp %v24, ; CHECK: br %r14 - %ret = sdiv <2 x i64> %val1, %val2 + %ret = sdiv nof <2 x i64> %val1, %val2 ret <2 x i64> %ret } Index: test/CodeGen/Thumb/2007-02-02-JoinIntervalsCrash.ll =================================================================== --- test/CodeGen/Thumb/2007-02-02-JoinIntervalsCrash.ll +++ test/CodeGen/Thumb/2007-02-02-JoinIntervalsCrash.ll @@ -15,7 +15,7 @@ %iftmp.13.0 = select i1 %tmp69, i8 48, i8 55 %tmp75 = add i8 %iftmp.13.0, 0 store i8 %tmp75, i8* null - %tmp81 = udiv i32 0, 0 + %tmp81 = udiv nof i32 0, 0 %tmp83 = icmp eq i32 %tmp81, 0 br i1 %tmp83, label %bb85, label %bb64 Index: test/CodeGen/Thumb/thumb-shrink-wrapping.ll =================================================================== --- test/CodeGen/Thumb/thumb-shrink-wrapping.ll +++ test/CodeGen/Thumb/thumb-shrink-wrapping.ll @@ -627,7 +627,7 @@ br i1 %cmp, label %if.then, label %if.else if.then: - %div = sdiv i32 5000, %value + %div = sdiv nof i32 5000, %value br label %if.end if.else: Index: test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll =================================================================== --- test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll +++ test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll @@ -19,7 +19,7 @@ bb11: ; preds = %bb5 %0 = load i32, i32* undef, align 4 ; <i32> [#uses=1] %1 = xor i32 %0, 123459876 ; <i32> [#uses=1] - %2 = sdiv i32 %1, 127773 ; <i32> [#uses=1] + %2 = sdiv nof i32 %1, 127773 ; <i32> [#uses=1] %3 = mul i32 %2, 2836 ; <i32> [#uses=1] %4 = sub i32 0, %3 ; <i32> [#uses=1] %5 = xor i32 %4, 123459876 ; <i32> [#uses=1] Index: test/CodeGen/Thumb2/div.ll =================================================================== --- test/CodeGen/Thumb2/div.ll +++ test/CodeGen/Thumb2/div.ll @@ -19,7 +19,7 @@ ; CHECK-THUMBV7M: sdiv ; CHECK-HWDIV: f1 ; CHECK-HWDIV: sdiv - %tmp1 = sdiv i32 %a, %b ; <i32> [#uses=1] + %tmp1 = sdiv nof i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } @@ -31,7 +31,7 @@ ; CHECK-THUMBV7M: udiv ; CHECK-HWDIV: f2 ; CHECK-HWDIV: udiv - %tmp1 = udiv i32 %a, %b ; <i32> [#uses=1] + %tmp1 = udiv nof i32 %a, %b ; <i32> [#uses=1] ret i32 %tmp1 } Index: test/CodeGen/Thumb2/thumb2-select.ll =================================================================== --- test/CodeGen/Thumb2/thumb2-select.ll +++ test/CodeGen/Thumb2/thumb2-select.ll @@ -69,7 +69,7 @@ ; CHECK: it hi ; CHECK: lsrhi {{r[0-9]+}} %tmp1 = icmp ugt i32 %a, %b - %tmp2 = udiv i32 %c, 3 + %tmp2 = udiv nof i32 %c, 3 %tmp3 = select i1 %tmp1, i32 %tmp2, i32 3 ret i32 %tmp3 } Index: test/CodeGen/WebAssembly/divrem-constant.ll =================================================================== --- test/CodeGen/WebAssembly/divrem-constant.ll +++ test/CodeGen/WebAssembly/divrem-constant.ll @@ -8,28 +8,28 @@ ; CHECK-LABEL: test_udiv_2: ; CHECK: i32.shr_u define i32 @test_udiv_2(i32 %x) { - %t = udiv i32 %x, 2 + %t = udiv nof i32 %x, 2 ret i32 %t } ; CHECK-LABEL: test_udiv_5: ; CHECK: i32.div_u define i32 @test_udiv_5(i32 %x) { - %t = udiv i32 %x, 5 + %t = udiv nof i32 %x, 5 ret i32 %t } ; CHECK-LABEL: test_sdiv_2: ; CHECK: i32.div_s define i32 @test_sdiv_2(i32 %x) { - %t = sdiv i32 %x, 2 + %t = sdiv nof i32 %x, 2 ret i32 %t } ; CHECK-LABEL: test_sdiv_5: ; CHECK: i32.div_s define i32 @test_sdiv_5(i32 %x) { - %t = sdiv i32 %x, 5 + %t = sdiv nof i32 %x, 5 ret i32 %t } Index: test/CodeGen/WebAssembly/i128.ll =================================================================== --- test/CodeGen/WebAssembly/i128.ll +++ test/CodeGen/WebAssembly/i128.ll @@ -51,7 +51,7 @@ ; CHECK: call __divti3@FUNCTION, ${{.+}}, ${{.+}}, ${{.+}}, ${{.+}}, ${{.+}}{{$}} ; CHECK: return{{$}} define i128 @sdiv128(i128 %x, i128 %y) { - %a = sdiv i128 %x, %y + %a = sdiv nof i128 %x, %y ret i128 %a } @@ -61,7 +61,7 @@ ; CHECK: call __udivti3@FUNCTION, ${{.+}}, ${{.+}}, ${{.+}}, ${{.+}}, ${{.+}}{{$}} ; CHECK: return{{$}} define i128 @udiv128(i128 %x, i128 %y) { - %a = udiv i128 %x, %y + %a = udiv nof i128 %x, %y ret i128 %a } Index: test/CodeGen/WebAssembly/i32.ll =================================================================== --- test/CodeGen/WebAssembly/i32.ll +++ test/CodeGen/WebAssembly/i32.ll @@ -53,7 +53,7 @@ ; CHECK-NEXT: i32.div_s $push0=, $pop[[L0]], $pop[[L1]]{{$}} ; CHECK-NEXT: return $pop0{{$}} define i32 @sdiv32(i32 %x, i32 %y) { - %a = sdiv i32 %x, %y + %a = sdiv nof i32 %x, %y ret i32 %a } @@ -65,7 +65,7 @@ ; CHECK-NEXT: i32.div_u $push0=, $pop[[L0]], $pop[[L1]]{{$}} ; CHECK-NEXT: return $pop0{{$}} define i32 @udiv32(i32 %x, i32 %y) { - %a = udiv i32 %x, %y + %a = udiv nof i32 %x, %y ret i32 %a } Index: test/CodeGen/WebAssembly/i64.ll =================================================================== --- test/CodeGen/WebAssembly/i64.ll +++ test/CodeGen/WebAssembly/i64.ll @@ -53,7 +53,7 @@ ; CHECK-NEXT: i64.div_s $push0=, $pop[[L0]], $pop[[L1]]{{$}} ; CHECK-NEXT: return $pop0{{$}} define i64 @sdiv64(i64 %x, i64 %y) { - %a = sdiv i64 %x, %y + %a = sdiv nof i64 %x, %y ret i64 %a } @@ -65,7 +65,7 @@ ; CHECK-NEXT: i64.div_u $push0=, $pop[[L0]], $pop[[L1]]{{$}} ; CHECK-NEXT: return $pop0{{$}} define i64 @udiv64(i64 %x, i64 %y) { - %a = udiv i64 %x, %y + %a = udiv nof i64 %x, %y ret i64 %a } Index: test/CodeGen/WebAssembly/phi.ll =================================================================== --- test/CodeGen/WebAssembly/phi.ll +++ test/CodeGen/WebAssembly/phi.ll @@ -16,7 +16,7 @@ %t = icmp slt i32 %p, 0 br i1 %t, label %true, label %done true: - %a = sdiv i32 %p, 3 + %a = sdiv nof i32 %p, 3 br label %done done: %s = phi i32 [ %a, %true ], [ %p, %entry ] Index: test/CodeGen/WebAssembly/reg-stackify.ll =================================================================== --- test/CodeGen/WebAssembly/reg-stackify.ll +++ test/CodeGen/WebAssembly/reg-stackify.ll @@ -49,7 +49,7 @@ ; CHECK-LABEL: sink_trap: ; CHECK: return $pop{{[0-9]+}}{{$}} define i32 @sink_trap(i32 %x, i32 %y, i32* %p) { - %t = sdiv i32 %x, %y + %t = sdiv nof i32 %x, %y store volatile i32 0, i32* %p ret i32 %t } @@ -202,21 +202,21 @@ ; CHECK-NEXT: return $pop[[L14]]{{$}} define i32 @div_tree(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i32 %j, i32 %k, i32 %l, i32 %m, i32 %n, i32 %o, i32 %p) { entry: - %div = sdiv i32 %a, %b - %div1 = sdiv i32 %c, %d - %div2 = sdiv i32 %div, %div1 - %div3 = sdiv i32 %e, %f - %div4 = sdiv i32 %g, %h - %div5 = sdiv i32 %div3, %div4 - %div6 = sdiv i32 %div2, %div5 - %div7 = sdiv i32 %i, %j - %div8 = sdiv i32 %k, %l - %div9 = sdiv i32 %div7, %div8 - %div10 = sdiv i32 %m, %n - %div11 = sdiv i32 %o, %p - %div12 = sdiv i32 %div10, %div11 - %div13 = sdiv i32 %div9, %div12 - %div14 = sdiv i32 %div6, %div13 + %div = sdiv nof i32 %a, %b + %div1 = sdiv nof i32 %c, %d + %div2 = sdiv nof i32 %div, %div1 + %div3 = sdiv nof i32 %e, %f + %div4 = sdiv nof i32 %g, %h + %div5 = sdiv nof i32 %div3, %div4 + %div6 = sdiv nof i32 %div2, %div5 + %div7 = sdiv nof i32 %i, %j + %div8 = sdiv nof i32 %k, %l + %div9 = sdiv nof i32 %div7, %div8 + %div10 = sdiv nof i32 %m, %n + %div11 = sdiv nof i32 %o, %p + %div12 = sdiv nof i32 %div10, %div11 + %div13 = sdiv nof i32 %div9, %div12 + %div14 = sdiv nof i32 %div6, %div13 ret i32 %div14 } @@ -293,7 +293,7 @@ %tmp2 = add i32 %arg, 1 %tmp3 = call i32 @callee(i32 %tmp2) %tmp5 = sub i32 %tmp3, %tmp1 - %tmp6 = sdiv i32 %tmp5, %tmp1 + %tmp6 = sdiv nof i32 %tmp5, %tmp1 ret i32 %tmp6 } Index: test/CodeGen/X86/2006-11-17-IllegalMove.ll =================================================================== --- test/CodeGen/X86/2006-11-17-IllegalMove.ll +++ test/CodeGen/X86/2006-11-17-IllegalMove.ll @@ -25,7 +25,7 @@ ] bb77: ; preds = %entry, %entry - %tmp99 = udiv i64 0, 0 ; <i64> [#uses=1] + %tmp99 = udiv nof i64 0, 0 ; <i64> [#uses=1] %tmp = load i8, i8* null ; <i8> [#uses=1] %tmp114 = icmp eq i64 0, 0 ; <i1> [#uses=1] br label %cond_true115 @@ -38,7 +38,7 @@ br label %cond_true120 cond_true120: ; preds = %cond_true115 - %tmp127 = udiv i8 %tmp, %tmp118 ; <i8> [#uses=1] + %tmp127 = udiv nof i8 %tmp, %tmp118 ; <i8> [#uses=1] %tmp127.upgrd.1 = zext i8 %tmp127 to i64 ; <i64> [#uses=1] br label %cond_next129 Index: test/CodeGen/X86/2007-04-08-InlineAsmCrash.ll =================================================================== --- test/CodeGen/X86/2007-04-08-InlineAsmCrash.ll +++ test/CodeGen/X86/2007-04-08-InlineAsmCrash.ll @@ -10,7 +10,7 @@ define fastcc i32 @bc_divide(%struct.bc_struct* %n1, %struct.bc_struct* %n2, %struct.bc_struct** %quot, i32 %scale) nounwind { entry: %tmp7.i46 = tail call i64 asm sideeffect ".byte 0x0f,0x31", "={dx},=*{ax},~{dirflag},~{fpsr},~{flags}"( i64* getelementptr (%struct.CycleCount, %struct.CycleCount* @_programStartTime, i32 0, i32 1) ) ; <i64> [#uses=0] - %tmp221 = sdiv i32 10, 0 ; <i32> [#uses=1] + %tmp221 = sdiv nof i32 10, 0 ; <i32> [#uses=1] tail call fastcc void @_one_mult( i8* null, i32 0, i32 %tmp221, i8* null ) ret i32 0 } Index: test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll =================================================================== --- test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll +++ test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll @@ -23,7 +23,7 @@ %tmp44 = load %struct.partition_elem*, %struct.partition_elem** %tmp43 ; <%struct.partition_elem*> [#uses=1] %tmp4445 = ptrtoint %struct.partition_elem* %tmp44 to i32 ; <i32> [#uses=1] %tmp48 = sub i32 %tmp4445, 0 ; <i32> [#uses=1] - %tmp49 = sdiv i32 %tmp48, 12 ; <i32> [#uses=1] + %tmp49 = sdiv nof i32 %tmp48, 12 ; <i32> [#uses=1] %indvar.next127 = add i32 %i.2115.0, 1 ; <i32> [#uses=2] %exitcond128 = icmp eq i32 %indvar.next127, 0 ; <i1> [#uses=1] br i1 %exitcond128, label %bb58, label %bb32 Index: test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll =================================================================== --- test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll +++ test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll @@ -95,7 +95,7 @@ cond_false.i: ; preds = %bb35 %tmp42 = load i8, i8* %arg1, align 1 ; <i8> [#uses=3] - %tmp7.i = udiv i8 %tmp42, %tmp40 ; <i8> [#uses=2] + %tmp7.i = udiv nof i8 %tmp42, %tmp40 ; <i8> [#uses=2] %tmp1.i197 = icmp eq i8 %tmp42, 0 ; <i1> [#uses=1] %tmp7.i198 = or i1 %tmp1.i197, %tmp1.i ; <i1> [#uses=1] br i1 %tmp7.i198, label %cond_true.i200, label %cond_next17.i Index: test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll =================================================================== --- test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll +++ test/CodeGen/X86/2007-11-30-LoadFolding-Bug.ll @@ -21,7 +21,7 @@ ret void cond_next.i: ; preds = %mp_unexp_mp2d.exit.i - %tmp22.i = sdiv i32 0, 2 ; <i32> [#uses=2] + %tmp22.i = sdiv nof i32 0, 2 ; <i32> [#uses=2] br i1 %foo, label %cond_true29.i, label %cond_next36.i cond_true29.i: ; preds = %cond_next.i Index: test/CodeGen/X86/2008-01-08-SchedulerCrash.ll =================================================================== --- test/CodeGen/X86/2008-01-08-SchedulerCrash.ll +++ test/CodeGen/X86/2008-01-08-SchedulerCrash.ll @@ -19,7 +19,7 @@ ret i32 0 bb951: ; preds = %bb986, %entry - %tmp955 = sdiv i32 %offset, 2 ; <i32> [#uses=3] + %tmp955 = sdiv nof i32 %offset, 2 ; <i32> [#uses=3] %tmp961 = getelementptr %struct.indexentry, %struct.indexentry* null, i32 %tmp955, i32 0 ; <i32*> [#uses=1] br i1 %cond, label %bb986, label %bb967 Index: test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll =================================================================== --- test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll +++ test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll @@ -21,11 +21,11 @@ br i1 %exitcond, label %bb35, label %bb24 bb35: ; preds = %bb24, %entry - %tmp42 = sdiv i32 %i, 9 ; <i32> [#uses=1] + %tmp42 = sdiv nof i32 %i, 9 ; <i32> [#uses=1] %tmp43 = add i32 %tmp42, -1 ; <i32> [#uses=1] %tmp4344 = sitofp i32 %tmp43 to double ; <double> [#uses=1] %tmp17.i76 = fmul double %tmp4344, 0.000000e+00 ; <double> [#uses=1] - %tmp48 = sdiv i32 %i, 3 ; <i32> [#uses=1] + %tmp48 = sdiv nof i32 %i, 3 ; <i32> [#uses=1] %tmp49 = srem i32 %tmp48, 3 ; <i32> [#uses=1] %tmp50 = add i32 %tmp49, -1 ; <i32> [#uses=1] %tmp5051 = sitofp i32 %tmp50 to double ; <double> [#uses=1] Index: test/CodeGen/X86/2008-04-16-CoalescerBug.ll =================================================================== --- test/CodeGen/X86/2008-04-16-CoalescerBug.ll +++ test/CodeGen/X86/2008-04-16-CoalescerBug.ll @@ -26,7 +26,7 @@ %tmp8081.us = zext i8 %tmp80.us to i32 ; <i32> [#uses=1] %tmp87.us = mul i32 %tmp8081.us, 0 ; <i32> [#uses=1] %tmp92.us = add i32 0, %tmp87.us ; <i32> [#uses=1] - %tmp93.us = udiv i32 %tmp92.us, 255 ; <i32> [#uses=1] + %tmp93.us = udiv nof i32 %tmp92.us, 255 ; <i32> [#uses=1] br label %bb94.us bb53: ; preds = %entry ret void Index: test/CodeGen/X86/2008-04-28-CoalescerBug.ll =================================================================== --- test/CodeGen/X86/2008-04-28-CoalescerBug.ll +++ test/CodeGen/X86/2008-04-28-CoalescerBug.ll @@ -96,7 +96,7 @@ br i1 false, label %bb13339, label %bb13330 bb13330: ; preds = %bb13324 - %tmp13337 = sdiv i64 0, 0 ; <i64> [#uses=1] + %tmp13337 = sdiv nof i64 0, 0 ; <i64> [#uses=1] br label %bb13339 bb13339: ; preds = %bb13330, %bb13324, %bb13252 @@ -109,7 +109,7 @@ bb13351: ; preds = %bb13345 %tmp13354 = mul i64 0, %tmp13318 ; <i64> [#uses=1] %tmp13357 = sub i64 %tmp1329013291, %tmp13309 ; <i64> [#uses=1] - %tmp13358 = sdiv i64 %tmp13354, %tmp13357 ; <i64> [#uses=1] + %tmp13358 = sdiv nof i64 %tmp13354, %tmp13357 ; <i64> [#uses=1] br label %bb13360 bb13360: ; preds = %bb13351, %bb13345, %bb13339 Index: test/CodeGen/X86/2009-02-08-CoalescerBug.ll =================================================================== --- test/CodeGen/X86/2009-02-08-CoalescerBug.ll +++ test/CodeGen/X86/2009-02-08-CoalescerBug.ll @@ -10,7 +10,7 @@ bb.i: ; preds = %entry %1 = zext i1 %or.cond to i32 ; <i32> [#uses=1] - %2 = sdiv i32 %1, 0 ; <i32> [#uses=1] + %2 = sdiv nof i32 %1, 0 ; <i32> [#uses=1] %3 = trunc i32 %2 to i16 ; <i16> [#uses=1] br label %bar.exit Index: test/CodeGen/X86/2009-10-25-RewriterBug.ll =================================================================== --- test/CodeGen/X86/2009-10-25-RewriterBug.ll +++ test/CodeGen/X86/2009-10-25-RewriterBug.ll @@ -73,7 +73,7 @@ br i1 undef, label %bb.nph51.i, label %bb66.i bb.nph51.i: ; preds = %bb38.i - %25 = sdiv i32 %storemerge52.i, 8 ; <i32> [#uses=0] + %25 = sdiv nof i32 %storemerge52.i, 8 ; <i32> [#uses=0] br label %bb39.i bb39.i: ; preds = %bb64.i, %bb.nph51.i Index: test/CodeGen/X86/2010-03-04-Mul8Bug.ll =================================================================== --- test/CodeGen/X86/2010-03-04-Mul8Bug.ll +++ test/CodeGen/X86/2010-03-04-Mul8Bug.ll @@ -10,7 +10,7 @@ define void @func_56(i64 %p_57, i32*** %p_58) nounwind ssp { for.end: %conv49 = trunc i32 undef to i8 ; <i8> [#uses=1] - %div.i = udiv i8 %conv49, 5 ; <i8> [#uses=1] + %div.i = udiv nof i8 %conv49, 5 ; <i8> [#uses=1] %conv51 = zext i8 %div.i to i32 ; <i32> [#uses=1] %call55 = call i32 @qux(i32 undef, i32 -2) nounwind ; <i32> [#uses=1] %rem.i = urem i32 %call55, -1 ; <i32> [#uses=1] Index: test/CodeGen/X86/2010-05-03-CoalescerSubRegClobber.ll =================================================================== --- test/CodeGen/X86/2010-05-03-CoalescerSubRegClobber.ll +++ test/CodeGen/X86/2010-05-03-CoalescerSubRegClobber.ll @@ -17,8 +17,8 @@ while.end: ; preds = %while.cond, %entry %x.0.lcssa = phi i32 [ %from, %entry ], [ %y.021, %while.cond ] ; <i32> [#uses=2] - %div = udiv i32 %from, %x.0.lcssa ; <i32> [#uses=1] - %div11 = udiv i32 %to, %x.0.lcssa ; <i32> [#uses=1] + %div = udiv nof i32 %from, %x.0.lcssa ; <i32> [#uses=1] + %div11 = udiv nof i32 %to, %x.0.lcssa ; <i32> [#uses=1] %conv = zext i32 %v to i64 ; <i64> [#uses=1] %conv14 = zext i32 %div11 to i64 ; <i64> [#uses=1] ; Verify that we don't clobber %eax after putting the imulq result in %rax @@ -27,7 +27,7 @@ ; CHECK: div %mul = mul i64 %conv14, %conv ; <i64> [#uses=1] %conv16 = zext i32 %div to i64 ; <i64> [#uses=1] - %div17 = udiv i64 %mul, %conv16 ; <i64> [#uses=1] + %div17 = udiv nof i64 %mul, %conv16 ; <i64> [#uses=1] %conv18 = trunc i64 %div17 to i32 ; <i32> [#uses=1] ret i32 %conv18 } Index: test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll =================================================================== --- test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll +++ test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll @@ -20,7 +20,7 @@ %btmp4 = trunc i64 %btmp3 to i32 ; <i32> [#uses=1] ; CHECK: idiv - %x6 = sdiv i32 %x5, %btmp4 ; <i32> [#uses=1] + %x6 = sdiv nof i32 %x5, %btmp4 ; <i32> [#uses=1] store i32 %x6, i32* %w, align 4 ret void } Index: test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll =================================================================== --- test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll +++ test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll @@ -13,7 +13,7 @@ define i32 @cvtchar(i8* nocapture %sp) nounwind { %temp.i = alloca [2 x i8], align 1 %tmp1 = load i8, i8* %sp, align 1 - %div = udiv i8 %tmp1, 10 + %div = udiv nof i8 %tmp1, 10 %rem = urem i8 %div, 10 %arrayidx.i = getelementptr inbounds [2 x i8], [2 x i8]* %temp.i, i32 0, i32 0 store i8 %rem, i8* %arrayidx.i, align 1 Index: test/CodeGen/X86/2011-06-03-x87chain.ll =================================================================== --- test/CodeGen/X86/2011-06-03-x87chain.ll +++ test/CodeGen/X86/2011-06-03-x87chain.ll @@ -9,7 +9,7 @@ ; CHECK: fstps store float %conv, float* %f, align 4 ; CHECK: idivl - %div = sdiv i32 %x, %y + %div = sdiv nof i32 %x, %y %conv5 = sext i32 %div to i64 store i64 %conv5, i64* %b, align 8 ret float %conv Index: test/CodeGen/X86/2011-10-19-LegelizeLoad.ll =================================================================== --- test/CodeGen/X86/2011-10-19-LegelizeLoad.ll +++ test/CodeGen/X86/2011-10-19-LegelizeLoad.ll @@ -20,7 +20,7 @@ ; CHECK: pmovsxbq j(%rip), % %0 = load <2 x i8>, <2 x i8>* @i, align 8 %1 = load <2 x i8>, <2 x i8>* @j, align 8 - %div = sdiv <2 x i8> %1, %0 + %div = sdiv nof <2 x i8> %1, %0 store <2 x i8> %div, <2 x i8>* getelementptr inbounds (%union.anon, %union.anon* @res, i32 0, i32 0), align 8 ret i32 0 ; CHECK: ret Index: test/CodeGen/X86/DynamicCalleeSavedRegisters.ll =================================================================== --- test/CodeGen/X86/DynamicCalleeSavedRegisters.ll +++ test/CodeGen/X86/DynamicCalleeSavedRegisters.ll @@ -47,7 +47,7 @@ ; The result will return in EAX, ECX and EDX. define x86_regcallcc {i32, i32, i32} @test_callee(i32 %a0, i32 %b0, i32 %c0, i32 %d0, i32 %e0) nounwind { %b1 = mul i32 7, %e0 - %b2 = udiv i32 5, %e0 + %b2 = udiv nof i32 5, %e0 %b3 = mul i32 7, %d0 %b4 = insertvalue {i32, i32, i32} undef, i32 %b1, 0 %b5 = insertvalue {i32, i32, i32} %b4, i32 %b2, 1 Index: test/CodeGen/X86/alldiv-divdi3.ll =================================================================== --- test/CodeGen/X86/alldiv-divdi3.ll +++ test/CodeGen/X86/alldiv-divdi3.ll @@ -8,7 +8,7 @@ define i32 @main(i32 %argc, i8** nocapture %argv) nounwind readonly { entry: %conv4 = sext i32 %argc to i64 - %div = sdiv i64 84, %conv4 + %div = sdiv nof i64 84, %conv4 %conv7 = trunc i64 %div to i32 ret i32 %conv7 } Index: test/CodeGen/X86/anyext.ll =================================================================== --- test/CodeGen/X86/anyext.ll +++ test/CodeGen/X86/anyext.ll @@ -23,7 +23,7 @@ ; X64-NEXT: andl $1, %eax ; X64-NEXT: retq %q = trunc i32 %p to i8 - %r = udiv i8 %q, %x + %r = udiv nof i8 %q, %x %s = zext i8 %r to i32 %t = and i32 %s, 1 ret i32 %t @@ -48,7 +48,7 @@ ; X64-NEXT: andl $1, %eax ; X64-NEXT: retq %q = trunc i32 %p to i16 - %r = udiv i16 %q, %x + %r = udiv nof i16 %q, %x %s = zext i16 %r to i32 %t = and i32 %s, 1 ret i32 %t Index: test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll =================================================================== --- test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll +++ test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll @@ -60,7 +60,7 @@ %mul = mul nsw i32 %6, 2 %7 = load i32, i32* @i, align 4 %8 = load i32, i32* @b, align 4 - %div = sdiv i32 %7, %8 + %div = sdiv nof i32 %7, %8 %9 = load i32, i32* @c, align 4 %10 = load i32, i32* @d, align 4 %11 = load i32, i32* @e, align 4 Index: test/CodeGen/X86/avx512-bugfix-23634.ll =================================================================== --- test/CodeGen/X86/avx512-bugfix-23634.ll +++ test/CodeGen/X86/avx512-bugfix-23634.ll @@ -32,7 +32,7 @@ %b_to_int32_broadcast = shufflevector <16 x i32> %b_to_int32_broadcast_init, <16 x i32> undef, <16 x i32> zeroinitializer %a_load_to_int32 = fptosi <16 x float> %ptr_masked_load.39 to <16 x i32> - %div_v019_load_ = sdiv <16 x i32> %b_to_int32_broadcast, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2> + %div_v019_load_ = sdiv nof <16 x i32> %b_to_int32_broadcast, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2> %v1.i = select <16 x i1> <i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>, <16 x i32> %a_load_to_int32 Index: test/CodeGen/X86/bypass-slow-division-32.ll =================================================================== --- test/CodeGen/X86/bypass-slow-division-32.ll +++ test/CodeGen/X86/bypass-slow-division-32.ll @@ -21,7 +21,7 @@ ; CHECK-NEXT: divb %cl ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: retl - %result = sdiv i32 %a, %b + %result = sdiv nof i32 %a, %b ret i32 %result } @@ -71,7 +71,7 @@ ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: addl %edx, %eax ; CHECK-NEXT: retl - %resultdiv = sdiv i32 %a, %b + %resultdiv = sdiv nof i32 %a, %b %resultrem = srem i32 %a, %b %result = add i32 %resultdiv, %resultrem ret i32 %result @@ -120,8 +120,8 @@ ; CHECK-NEXT: popl %edi ; CHECK-NEXT: popl %ebx ; CHECK-NEXT: retl - %resultidiv = sdiv i32 %a, %b - %resultdiv = udiv i32 %a, %b + %resultidiv = sdiv nof i32 %a, %b + %resultdiv = udiv nof i32 %a, %b %result = add i32 %resultidiv, %resultdiv ret i32 %result } @@ -131,7 +131,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: movl $64, %eax ; CHECK-NEXT: retl - %resultdiv = sdiv i32 256, 4 + %resultdiv = sdiv nof i32 256, 4 ret i32 %resultdiv } @@ -145,7 +145,7 @@ ; CHECK-NEXT: sarl $3, %edx ; CHECK-NEXT: leal (%edx,%eax), %eax ; CHECK-NEXT: retl - %resultdiv = sdiv i32 %a, 33 + %resultdiv = sdiv nof i32 %a, 33 ret i32 %resultdiv } @@ -188,7 +188,7 @@ ; CHECK-NEXT: addl %edx, %ecx ; CHECK-NEXT: movl %ecx, %eax ; CHECK-NEXT: retl - %resultdiv = sdiv i32 %a, 33 + %resultdiv = sdiv nof i32 %a, 33 %resultrem = srem i32 %a, 33 %result = add i32 %resultdiv, %resultrem ret i32 %result @@ -212,7 +212,7 @@ ; CHECK-NEXT: divb %cl ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: retl - %resultdiv = sdiv i32 4, %a + %resultdiv = sdiv nof i32 4, %a ret i32 %resultdiv } @@ -234,6 +234,6 @@ ; CHECK-NEXT: divb %cl ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: retl - %resultdiv = sdiv i32 4, %a + %resultdiv = sdiv nof i32 4, %a ret i32 %resultdiv } Index: test/CodeGen/X86/bypass-slow-division-64.ll =================================================================== --- test/CodeGen/X86/bypass-slow-division-64.ll +++ test/CodeGen/X86/bypass-slow-division-64.ll @@ -22,7 +22,7 @@ ; CHECK-NEXT: divl %esi ; CHECK-NEXT: # kill: def %eax killed %eax def %rax ; CHECK-NEXT: retq - %result = sdiv i64 %a, %b + %result = sdiv nof i64 %a, %b ret i64 %result } @@ -71,7 +71,7 @@ ; CHECK-NEXT: # kill: def %eax killed %eax def %rax ; CHECK-NEXT: addq %rdx, %rax ; CHECK-NEXT: retq - %resultdiv = sdiv i64 %a, %b + %resultdiv = sdiv nof i64 %a, %b %resultrem = srem i64 %a, %b %result = add i64 %resultdiv, %resultrem ret i64 %result Index: test/CodeGen/X86/bypass-slow-division-tune.ll =================================================================== --- test/CodeGen/X86/bypass-slow-division-tune.ll +++ test/CodeGen/X86/bypass-slow-division-tune.ll @@ -15,7 +15,7 @@ ; REST-LABEL: div32: ; REST-NOT: divb ; - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b ret i32 %div } @@ -27,7 +27,7 @@ ; CHECK: shrq $32, [[REG]] ; CHECK: divl ; - %div = sdiv i64 %a, %b + %div = sdiv nof i64 %a, %b ret i64 %div } @@ -38,7 +38,7 @@ ; CHECK-LABEL: div64_optsize: ; CHECK-NOT: divl ; CHECK: ret - %div = sdiv i64 %a, %b + %div = sdiv nof i64 %a, %b ret i64 %div } @@ -46,7 +46,7 @@ ; HUGEWS-LABEL: div64_hugews: ; HUGEWS-NOT: divl ; HUGEWS: ret - %div = sdiv i64 %a, %b + %div = sdiv nof i64 %a, %b ret i64 %div } @@ -54,7 +54,7 @@ ; CHECK-LABEL: div32_optsize: ; CHECK-NOT: divb ; CHECK: ret - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b ret i32 %div } @@ -62,7 +62,7 @@ ; CHECK-LABEL: div32_minsize: ; CHECK-NOT: divb ; CHECK: ret - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b ret i32 %div } Index: test/CodeGen/X86/coalescer-identity.ll =================================================================== --- test/CodeGen/X86/coalescer-identity.ll +++ test/CodeGen/X86/coalescer-identity.ll @@ -36,7 +36,7 @@ %3 = phi i32 [ %1, %for.body.us ], [ %4, %cond.end.us ] %dec12.us = phi i16 [ %add.us, %for.body.us ], [ %dec.us, %cond.end.us ] %inc.us = add i32 %i.011.us, 1 - %phitmp.us = udiv i32 %v.010.us, 12 + %phitmp.us = udiv nof i32 %v.010.us, 12 %tobool.us = icmp eq i32 %inc.us, 0 br i1 %tobool.us, label %for.end, label %for.body.us @@ -67,7 +67,7 @@ %dec = sext i1 %not.tobool5 to i16 %dec12 = add i16 %add, %dec %inc = add i32 %i.011, 1 - %phitmp = udiv i32 %v.010, 12 + %phitmp = udiv nof i32 %v.010, 12 %tobool = icmp eq i32 %inc, 0 br i1 %tobool, label %for.end, label %for.body Index: test/CodeGen/X86/combine-sdiv.ll =================================================================== --- test/CodeGen/X86/combine-sdiv.ll +++ test/CodeGen/X86/combine-sdiv.ll @@ -3,13 +3,13 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2 -; fold (sdiv undef, x) -> 0 +; fold (sdiv nof undef, x) -> 0 define i32 @combine_sdiv_undef0(i32 %x) { ; CHECK-LABEL: combine_sdiv_undef0: ; CHECK: # %bb.0: ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: retq - %1 = sdiv i32 undef, %x + %1 = sdiv nof i32 undef, %x ret i32 %1 } @@ -17,16 +17,16 @@ ; CHECK-LABEL: combine_vec_sdiv_undef0: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %1 = sdiv <4 x i32> undef, %x + %1 = sdiv nof <4 x i32> undef, %x ret <4 x i32> %1 } -; fold (sdiv x, undef) -> undef +; fold (sdiv nof x, undef) -> undef define i32 @combine_sdiv_undef1(i32 %x) { ; CHECK-LABEL: combine_sdiv_undef1: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %1 = sdiv i32 %x, undef + %1 = sdiv nof i32 %x, undef ret i32 %1 } @@ -34,17 +34,17 @@ ; CHECK-LABEL: combine_vec_sdiv_undef1: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %1 = sdiv <4 x i32> %x, undef + %1 = sdiv nof <4 x i32> %x, undef ret <4 x i32> %1 } -; fold (sdiv x, 1) -> x +; fold (sdiv nof x, 1) -> x define i32 @combine_sdiv_by_one(i32 %x) { ; CHECK-LABEL: combine_sdiv_by_one: ; CHECK: # %bb.0: ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: retq - %1 = sdiv i32 %x, 1 + %1 = sdiv nof i32 %x, 1 ret i32 %1 } @@ -52,18 +52,18 @@ ; CHECK-LABEL: combine_vec_sdiv_by_one: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %1 = sdiv <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1> + %1 = sdiv nof <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1> ret <4 x i32> %1 } -; fold (sdiv x, -1) -> 0 - x +; fold (sdiv nof x, -1) -> 0 - x define i32 @combine_sdiv_by_negone(i32 %x) { ; CHECK-LABEL: combine_sdiv_by_negone: ; CHECK: # %bb.0: ; CHECK-NEXT: negl %edi ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: retq - %1 = sdiv i32 %x, -1 + %1 = sdiv nof i32 %x, -1 ret i32 %1 } @@ -80,11 +80,11 @@ ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq - %1 = sdiv <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1> + %1 = sdiv nof <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1> ret <4 x i32> %1 } -; TODO fold (sdiv x, x) -> 1 +; TODO fold (sdiv nof x, x) -> 1 define i32 @combine_sdiv_dupe(i32 %x) { ; CHECK-LABEL: combine_sdiv_dupe: ; CHECK: # %bb.0: @@ -92,7 +92,7 @@ ; CHECK-NEXT: cltd ; CHECK-NEXT: idivl %edi ; CHECK-NEXT: retq - %1 = sdiv i32 %x, %x + %1 = sdiv nof i32 %x, %x ret i32 %1 } @@ -147,11 +147,11 @@ ; AVX-NEXT: idivl %ecx ; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 ; AVX-NEXT: retq - %1 = sdiv <4 x i32> %x, %x + %1 = sdiv nof <4 x i32> %x, %x ret <4 x i32> %1 } -; fold (sdiv x, y) -> (udiv x, y) iff x and y are positive +; fold (sdiv nof x, y) -> (udiv nof x, y) iff x and y are positive define <4 x i32> @combine_vec_sdiv_by_pos0(<4 x i32> %x) { ; SSE-LABEL: combine_vec_sdiv_by_pos0: ; SSE: # %bb.0: @@ -165,7 +165,7 @@ ; AVX-NEXT: vpsrld $2, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255> - %2 = sdiv <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4> + %2 = sdiv nof <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4> ret <4 x i32> %2 } @@ -201,11 +201,11 @@ ; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: retq %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255> - %2 = sdiv <4 x i32> %1, <i32 1, i32 4, i32 8, i32 16> + %2 = sdiv nof <4 x i32> %1, <i32 1, i32 4, i32 8, i32 16> ret <4 x i32> %2 } -; fold (sdiv x, (1 << c)) -> x >>u c +; fold (sdiv nof x, (1 << c)) -> x >>u c define <4 x i32> @combine_vec_sdiv_by_pow2a(<4 x i32> %x) { ; SSE-LABEL: combine_vec_sdiv_by_pow2a: ; SSE: # %bb.0: @@ -224,7 +224,7 @@ ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpsrad $2, %xmm0, %xmm0 ; AVX-NEXT: retq - %1 = sdiv <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4> + %1 = sdiv nof <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4> ret <4 x i32> %1 } @@ -278,6 +278,6 @@ ; AVX-NEXT: sarl $4, %ecx ; AVX-NEXT: vpinsrd $3, %ecx, %xmm1, %xmm0 ; AVX-NEXT: retq - %1 = sdiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16> + %1 = sdiv nof <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16> ret <4 x i32> %1 } Index: test/CodeGen/X86/combine-udiv.ll =================================================================== --- test/CodeGen/X86/combine-udiv.ll +++ test/CodeGen/X86/combine-udiv.ll @@ -3,13 +3,13 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2 -; fold (udiv undef, x) -> 0 +; fold (udiv nof undef, x) -> 0 define i32 @combine_udiv_undef0(i32 %x) { ; CHECK-LABEL: combine_udiv_undef0: ; CHECK: # %bb.0: ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: retq - %1 = udiv i32 undef, %x + %1 = udiv nof i32 undef, %x ret i32 %1 } @@ -17,16 +17,16 @@ ; CHECK-LABEL: combine_vec_udiv_undef0: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %1 = udiv <4 x i32> undef, %x + %1 = udiv nof <4 x i32> undef, %x ret <4 x i32> %1 } -; fold (udiv x, undef) -> undef +; fold (udiv nof x, undef) -> undef define i32 @combine_udiv_undef1(i32 %x) { ; CHECK-LABEL: combine_udiv_undef1: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %1 = udiv i32 %x, undef + %1 = udiv nof i32 %x, undef ret i32 %1 } @@ -34,17 +34,17 @@ ; CHECK-LABEL: combine_vec_udiv_undef1: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %1 = udiv <4 x i32> %x, undef + %1 = udiv nof <4 x i32> %x, undef ret <4 x i32> %1 } -; fold (udiv x, 1) -> x +; fold (udiv nof x, 1) -> x define i32 @combine_udiv_by_one(i32 %x) { ; CHECK-LABEL: combine_udiv_by_one: ; CHECK: # %bb.0: ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: retq - %1 = udiv i32 %x, 1 + %1 = udiv nof i32 %x, 1 ret i32 %1 } @@ -52,11 +52,11 @@ ; CHECK-LABEL: combine_vec_udiv_by_one: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %1 = udiv <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1> + %1 = udiv nof <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1> ret <4 x i32> %1 } -; TODO fold (udiv x, x) -> 1 +; TODO fold (udiv nof x, x) -> 1 define i32 @combine_udiv_dupe(i32 %x) { ; CHECK-LABEL: combine_udiv_dupe: ; CHECK: # %bb.0: @@ -64,7 +64,7 @@ ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: divl %edi ; CHECK-NEXT: retq - %1 = udiv i32 %x, %x + %1 = udiv nof i32 %x, %x ret i32 %1 } @@ -111,11 +111,11 @@ ; AVX-NEXT: divl %eax ; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 ; AVX-NEXT: retq - %1 = udiv <4 x i32> %x, %x + %1 = udiv nof <4 x i32> %x, %x ret <4 x i32> %1 } -; fold (udiv x, (1 << c)) -> x >>u c +; fold (udiv nof x, (1 << c)) -> x >>u c define <4 x i32> @combine_vec_udiv_by_pow2a(<4 x i32> %x) { ; SSE-LABEL: combine_vec_udiv_by_pow2a: ; SSE: # %bb.0: @@ -126,7 +126,7 @@ ; AVX: # %bb.0: ; AVX-NEXT: vpsrld $2, %xmm0, %xmm0 ; AVX-NEXT: retq - %1 = udiv <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4> + %1 = udiv nof <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4> ret <4 x i32> %1 } @@ -158,7 +158,7 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: retq - %1 = udiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16> + %1 = udiv nof <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16> ret <4 x i32> %1 } @@ -205,11 +205,11 @@ ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %1 = shl <4 x i32> <i32 1, i32 1, i32 1, i32 1>, %y - %2 = udiv <4 x i32> %x, %1 + %2 = udiv nof <4 x i32> %x, %1 ret <4 x i32> %2 } -; fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 +; fold (udiv nof x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: combine_vec_udiv_by_shl_pow2a: ; SSE: # %bb.0: @@ -257,7 +257,7 @@ ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %1 = shl <4 x i32> <i32 4, i32 4, i32 4, i32 4>, %y - %2 = udiv <4 x i32> %x, %1 + %2 = udiv nof <4 x i32> %x, %1 ret <4 x i32> %2 } @@ -307,6 +307,6 @@ ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %1 = shl <4 x i32> <i32 1, i32 4, i32 8, i32 16>, %y - %2 = udiv <4 x i32> %x, %1 + %2 = udiv nof <4 x i32> %x, %1 ret <4 x i32> %2 } Index: test/CodeGen/X86/crash-O0.ll =================================================================== --- test/CodeGen/X86/crash-O0.ll +++ test/CodeGen/X86/crash-O0.ll @@ -11,7 +11,7 @@ define i32 @div8() nounwind { entry: %0 = trunc i64 undef to i8 ; <i8> [#uses=3] - %1 = udiv i8 0, %0 ; <i8> [#uses=1] + %1 = udiv nof i8 0, %0 ; <i8> [#uses=1] %2 = urem i8 0, %0 ; <i8> [#uses=1] %3 = icmp uge i8 %2, %0 ; <i1> [#uses=1] br i1 %3, label %"40", label %"39" @@ -30,7 +30,7 @@ unreachable } -; When using fast isel, sdiv is lowered into a sequence of CQO + DIV64. +; When using fast isel, sdiv nof is lowered into a sequence of CQO + DIV64. ; CQO defines implicitly AX and DIV64 uses it implicitly too. ; When an instruction gets between those two, RegAllocFast was reusing ; AX for the vreg defined in between and the compiler crashed. @@ -46,6 +46,6 @@ define i64 @addressModeWith32bitIndex(i32 %V) { %gep = getelementptr i64, i64* null, i32 %V %load = load i64, i64* %gep - %sdiv = sdiv i64 0, %load + %sdiv = sdiv nof i64 0, %load ret i64 %sdiv } Index: test/CodeGen/X86/crash-lre-eliminate-dead-def.ll =================================================================== --- test/CodeGen/X86/crash-lre-eliminate-dead-def.ll +++ test/CodeGen/X86/crash-lre-eliminate-dead-def.ll @@ -86,7 +86,7 @@ %tmp3 = load i32, i32* @g, align 4 %tmp4 = load i16, i16* @a, align 2 %conv = sext i16 %tmp4 to i32 - %div = sdiv i32 %tmp3, %conv + %div = sdiv nof i32 %tmp3, %conv %tobool12 = icmp eq i32 %div, 0 br i1 %tobool12, label %for.cond15, label %L5 Index: test/CodeGen/X86/crash.ll =================================================================== --- test/CodeGen/X86/crash.ll +++ test/CodeGen/X86/crash.ll @@ -86,7 +86,7 @@ %5 = add nsw i32 %1, 256 ; <i32> [#uses=1] %storemerge.i.i57 = select i1 %4, i32 %5, i32 %1 ; <i32> [#uses=1] %6 = shl i32 %storemerge.i.i57, 16 ; <i32> [#uses=1] - %7 = sdiv i32 %6, -256 ; <i32> [#uses=1] + %7 = sdiv nof i32 %6, -256 ; <i32> [#uses=1] %8 = trunc i32 %7 to i8 ; <i8> [#uses=1] store i8 %8, i8* undef, align 1 ret void @@ -136,7 +136,7 @@ ; PR7540 define void @copy8bitregs() nounwind { entry: - %div.i = sdiv i32 115200, 0 + %div.i = sdiv nof i32 115200, 0 %shr8.i = lshr i32 %div.i, 8 %conv4.i = trunc i32 %shr8.i to i8 call void asm sideeffect "outb $0, ${1:w}", "{ax},N{dx},~{dirflag},~{fpsr},~{flags}"(i8 %conv4.i, i32 1017) nounwind Index: test/CodeGen/X86/div-rem-simplify.ll =================================================================== --- test/CodeGen/X86/div-rem-simplify.ll +++ test/CodeGen/X86/div-rem-simplify.ll @@ -23,7 +23,7 @@ ; CHECK-LABEL: sdiv0: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %div = sdiv i32 %x, 0 + %div = sdiv nof i32 %x, 0 ret i32 %div } @@ -31,7 +31,7 @@ ; CHECK-LABEL: udiv0: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %div = udiv i32 %x, 0 + %div = udiv nof i32 %x, 0 ret i32 %div } @@ -57,7 +57,7 @@ ; CHECK-LABEL: sdiv_vec0: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %div = sdiv <4 x i32> %x, zeroinitializer + %div = sdiv nof <4 x i32> %x, zeroinitializer ret <4 x i32> %div } @@ -65,7 +65,7 @@ ; CHECK-LABEL: udiv_vec0: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %div = udiv <4 x i32> %x, zeroinitializer + %div = udiv nof <4 x i32> %x, zeroinitializer ret <4 x i32> %div } @@ -95,7 +95,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: retq %sel = select i1 %cond, i32 23, i32 234 - %div = udiv i32 %sel, 0 + %div = udiv nof i32 %sel, 0 ret i32 %div } @@ -104,7 +104,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: retq %sel = select i1 %cond, i32 23, i32 234 - %div = sdiv i32 %sel, 0 + %div = sdiv nof i32 %sel, 0 ret i32 %div } @@ -134,7 +134,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: retq %sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14> - %div = udiv <4 x i32> %sel, zeroinitializer + %div = udiv nof <4 x i32> %sel, zeroinitializer ret <4 x i32> %div } @@ -143,7 +143,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: retq %sel = select i1 %cond, <4 x i32> <i32 -1, i32 0, i32 1, i32 2>, <4 x i32> <i32 11, i32 12, i32 13, i32 14> - %div = sdiv <4 x i32> %sel, zeroinitializer + %div = sdiv nof <4 x i32> %sel, zeroinitializer ret <4 x i32> %div } @@ -155,7 +155,7 @@ ; CHECK-NEXT: retq %zero = and <4 x i32> %x, <i32 0, i32 0, i32 0, i32 0> %some_ones = or <4 x i32> %zero, <i32 0, i32 -1, i32 0, i32 3> - %div = sdiv <4 x i32> <i32 -11, i32 -12, i32 -13, i32 -14>, %some_ones + %div = sdiv nof <4 x i32> <i32 -11, i32 -12, i32 -13, i32 -14>, %some_ones ret <4 x i32> %div } @@ -163,7 +163,7 @@ ; CHECK-LABEL: udiv0elt_vec: ; CHECK: # %bb.0: ; CHECK-NEXT: retq - %div = udiv <4 x i32> <i32 11, i32 12, i32 13, i32 14>, <i32 0, i32 3, i32 4, i32 0> + %div = udiv nof <4 x i32> <i32 11, i32 12, i32 13, i32 14>, <i32 0, i32 3, i32 4, i32 0> ret <4 x i32> %div } Index: test/CodeGen/X86/div8.ll =================================================================== --- test/CodeGen/X86/div8.ll +++ test/CodeGen/X86/div8.ll @@ -15,7 +15,7 @@ ; Insist on i8->i32 zero extension, even though divb demands only i16: ; CHECK: movzbl {{.*}}%eax ; CHECK: divb - %div = udiv i8 %tmp, %tmp1 + %div = udiv nof i8 %tmp, %tmp1 store i8 %div, i8* %quotient, align 1 %tmp4 = load i8, i8* %quotient, align 1 ret i8 %tmp4 Index: test/CodeGen/X86/divide-by-constant.ll =================================================================== --- test/CodeGen/X86/divide-by-constant.ll +++ test/CodeGen/X86/divide-by-constant.ll @@ -18,7 +18,7 @@ ; X64-NEXT: # kill: def %ax killed %ax killed %eax ; X64-NEXT: retq entry: - %div = udiv i16 %x, 33 + %div = udiv nof i16 %x, 33 ret i16 %div } @@ -38,7 +38,7 @@ ; X64-NEXT: # kill: def %ax killed %ax killed %eax ; X64-NEXT: retq entry: - %div = udiv i16 %c, 3 + %div = udiv nof i16 %c, 3 ret i16 %div } @@ -61,7 +61,7 @@ ; X64-NEXT: # kill: def %al killed %al killed %eax ; X64-NEXT: retq entry: - %div = udiv i8 %c, 3 + %div = udiv nof i8 %c, 3 ret i8 %div } @@ -87,7 +87,7 @@ ; X64-NEXT: # kill: def %ax killed %ax killed %eax ; X64-NEXT: retq entry: - %div = sdiv i16 %x, 33 ; <i32> [#uses=1] + %div = sdiv nof i16 %x, 33 ; <i32> [#uses=1] ret i16 %div } @@ -107,7 +107,7 @@ ; X64-NEXT: shrq $59, %rax ; X64-NEXT: # kill: def %eax killed %eax killed %rax ; X64-NEXT: retq - %tmp1 = udiv i32 %A, 1577682821 ; <i32> [#uses=1] + %tmp1 = udiv nof i32 %A, 1577682821 ; <i32> [#uses=1] ret i32 %tmp1 } @@ -133,7 +133,7 @@ ; X64-NEXT: # kill: def %ax killed %ax killed %eax ; X64-NEXT: retq entry: - %div = sdiv i16 %x, 10 + %div = sdiv nof i16 %x, 10 ret i16 %div } @@ -155,7 +155,7 @@ ; X64-NEXT: shrq $32, %rax ; X64-NEXT: # kill: def %eax killed %eax killed %rax ; X64-NEXT: retq - %div = udiv i32 %x, 28 + %div = udiv nof i32 %x, 28 ret i32 %div } @@ -181,7 +181,7 @@ ; X64-NEXT: movzwl %ax, %eax ; X64-NEXT: # kill: def %al killed %al killed %eax ; X64-NEXT: retq - %div = udiv i8 %x, 78 + %div = udiv nof i8 %x, 78 ret i8 %div } @@ -206,7 +206,7 @@ ; X64-NEXT: movzwl %ax, %eax ; X64-NEXT: # kill: def %al killed %al killed %eax ; X64-NEXT: retq - %div = udiv i8 %x, 116 + %div = udiv nof i8 %x, 116 ret i8 %div } @@ -229,7 +229,7 @@ ; X64-NEXT: idivl %ecx ; X64-NEXT: retq entry: - %div = sdiv i32 %x, 32 + %div = sdiv nof i32 %x, 32 ret i32 %div } @@ -252,7 +252,7 @@ ; X64-NEXT: idivl %ecx ; X64-NEXT: retq entry: - %div = sdiv i32 %x, 33 + %div = sdiv nof i32 %x, 33 ret i32 %div } @@ -269,7 +269,7 @@ ; X64-NEXT: movl %edi, %eax ; X64-NEXT: retq entry: - %div = udiv i32 %x, 32 + %div = udiv nof i32 %x, 32 ret i32 %div } @@ -292,7 +292,7 @@ ; X64-NEXT: divl %ecx ; X64-NEXT: retq entry: - %div = udiv i32 %x, 33 + %div = udiv nof i32 %x, 33 ret i32 %div } @@ -333,6 +333,6 @@ ; X64-NEXT: retq entry: %rem = urem i64 %x, 12345 - %div = udiv i64 %rem, 7 + %div = udiv nof i64 %rem, 7 ret i64 %div } Index: test/CodeGen/X86/divide-windows-itanium.ll =================================================================== --- test/CodeGen/X86/divide-windows-itanium.ll +++ test/CodeGen/X86/divide-windows-itanium.ll @@ -1,7 +1,7 @@ ; RUN: llc -mtriple i686-windows-itanium -filetype asm -o - %s | FileCheck %s define i64 @f(i64 %i, i64 %j) { - %1 = sdiv i64 %i, %j + %1 = sdiv nof i64 %i, %j ret i64 %1 } @@ -10,7 +10,7 @@ ; CHECK: calll __alldiv define i64 @g(i64 %i, i64 %j) { - %1 = udiv i64 %i, %j + %1 = udiv nof i64 %i, %j ret i64 %1 } Index: test/CodeGen/X86/divrem.ll =================================================================== --- test/CodeGen/X86/divrem.ll +++ test/CodeGen/X86/divrem.ll @@ -46,7 +46,7 @@ ; X64-NEXT: movq %rax, (%r8) ; X64-NEXT: movq %rdx, (%rcx) ; X64-NEXT: retq - %r = sdiv i64 %x, %y + %r = sdiv nof i64 %x, %y %t = srem i64 %x, %y store i64 %r, i64* %p store i64 %t, i64* %q @@ -76,7 +76,7 @@ ; X64-NEXT: movl %eax, (%r8) ; X64-NEXT: movl %edx, (%rcx) ; X64-NEXT: retq - %r = sdiv i32 %x, %y + %r = sdiv nof i32 %x, %y %t = srem i32 %x, %y store i32 %r, i32* %p store i32 %t, i32* %q @@ -106,7 +106,7 @@ ; X64-NEXT: movw %ax, (%r8) ; X64-NEXT: movw %dx, (%rcx) ; X64-NEXT: retq - %r = sdiv i16 %x, %y + %r = sdiv nof i16 %x, %y %t = srem i16 %x, %y store i16 %r, i16* %p store i16 %t, i16* %q @@ -137,7 +137,7 @@ ; X64-NEXT: movb %al, (%rdx) ; X64-NEXT: movb %sil, (%rcx) ; X64-NEXT: retq - %r = sdiv i8 %x, %y + %r = sdiv nof i8 %x, %y %t = srem i8 %x, %y store i8 %r, i8* %p store i8 %t, i8* %q @@ -188,7 +188,7 @@ ; X64-NEXT: movq %rax, (%r8) ; X64-NEXT: movq %rdx, (%rcx) ; X64-NEXT: retq - %r = udiv i64 %x, %y + %r = udiv nof i64 %x, %y %t = urem i64 %x, %y store i64 %r, i64* %p store i64 %t, i64* %q @@ -218,7 +218,7 @@ ; X64-NEXT: movl %eax, (%r8) ; X64-NEXT: movl %edx, (%rcx) ; X64-NEXT: retq - %r = udiv i32 %x, %y + %r = udiv nof i32 %x, %y %t = urem i32 %x, %y store i32 %r, i32* %p store i32 %t, i32* %q @@ -248,7 +248,7 @@ ; X64-NEXT: movw %ax, (%r8) ; X64-NEXT: movw %dx, (%rcx) ; X64-NEXT: retq - %r = udiv i16 %x, %y + %r = udiv nof i16 %x, %y %t = urem i16 %x, %y store i16 %r, i16* %p store i16 %t, i16* %q @@ -279,7 +279,7 @@ ; X64-NEXT: movb %al, (%rdx) ; X64-NEXT: movb %sil, (%rcx) ; X64-NEXT: retq - %r = udiv i8 %x, %y + %r = udiv nof i8 %x, %y %t = urem i8 %x, %y store i8 %r, i8* %p store i8 %t, i8* %q Index: test/CodeGen/X86/divrem8_ext.ll =================================================================== --- test/CodeGen/X86/divrem8_ext.ll +++ test/CodeGen/X86/divrem8_ext.ll @@ -22,7 +22,7 @@ ; X64-NEXT: movb %al, {{.*}}(%rip) ; X64-NEXT: movl %ecx, %eax ; X64-NEXT: retq - %div = udiv i8 %x, %y + %div = udiv nof i8 %x, %y store i8 %div, i8* @z %1 = urem i8 %x, %y ret i8 %1 @@ -118,7 +118,7 @@ ; X64-NEXT: movb %al, {{.*}}(%rip) ; X64-NEXT: movl %ecx, %eax ; X64-NEXT: retq - %div = sdiv i8 %x, %y + %div = sdiv nof i8 %x, %y store i8 %div, i8* @z %1 = srem i8 %x, %y ret i8 %1 @@ -218,7 +218,7 @@ ; X64-NEXT: addq %rcx, %rax ; X64-NEXT: retq %r1 = urem i8 %a, %c - %d1 = udiv i8 %a, %c + %d1 = udiv nof i8 %a, %c %r2 = zext i8 %r1 to i64 %d2 = zext i8 %d1 to i64 %ret = add i64 %r2, %d2 Index: test/CodeGen/X86/early-ifcvt.ll =================================================================== --- test/CodeGen/X86/early-ifcvt.ll +++ test/CodeGen/X86/early-ifcvt.ll @@ -151,7 +151,7 @@ br i1 %1, label %4, label %2 ; <label>:2 ; preds = %0 - %3 = sdiv i32 %a, %b + %3 = sdiv nof i32 %a, %b br label %4 ; <label>:4 ; preds = %0, %2 @@ -166,7 +166,7 @@ br i1 %1, label %4, label %2 ; <label>:2 ; preds = %0 - %3 = udiv i32 %a, %b + %3 = udiv nof i32 %a, %b br label %4 ; <label>:4 ; preds = %0, %2 Index: test/CodeGen/X86/fast-isel-divrem-x86-64.ll =================================================================== --- test/CodeGen/X86/fast-isel-divrem-x86-64.ll +++ test/CodeGen/X86/fast-isel-divrem-x86-64.ll @@ -2,7 +2,7 @@ define i64 @test_sdiv64(i64 %dividend, i64 %divisor) nounwind { entry: - %result = sdiv i64 %dividend, %divisor + %result = sdiv nof i64 %dividend, %divisor ret i64 %result } @@ -22,7 +22,7 @@ define i64 @test_udiv64(i64 %dividend, i64 %divisor) nounwind { entry: - %result = udiv i64 %dividend, %divisor + %result = udiv nof i64 %dividend, %divisor ret i64 %result } Index: test/CodeGen/X86/fast-isel-divrem.ll =================================================================== --- test/CodeGen/X86/fast-isel-divrem.ll +++ test/CodeGen/X86/fast-isel-divrem.ll @@ -3,7 +3,7 @@ define i8 @test_sdiv8(i8 %dividend, i8 %divisor) nounwind { entry: - %result = sdiv i8 %dividend, %divisor + %result = sdiv nof i8 %dividend, %divisor ret i8 %result } @@ -23,7 +23,7 @@ define i8 @test_udiv8(i8 %dividend, i8 %divisor) nounwind { entry: - %result = udiv i8 %dividend, %divisor + %result = udiv nof i8 %dividend, %divisor ret i8 %result } @@ -43,7 +43,7 @@ define i16 @test_sdiv16(i16 %dividend, i16 %divisor) nounwind { entry: - %result = sdiv i16 %dividend, %divisor + %result = sdiv nof i16 %dividend, %divisor ret i16 %result } @@ -63,7 +63,7 @@ define i16 @test_udiv16(i16 %dividend, i16 %divisor) nounwind { entry: - %result = udiv i16 %dividend, %divisor + %result = udiv nof i16 %dividend, %divisor ret i16 %result } @@ -83,7 +83,7 @@ define i32 @test_sdiv32(i32 %dividend, i32 %divisor) nounwind { entry: - %result = sdiv i32 %dividend, %divisor + %result = sdiv nof i32 %dividend, %divisor ret i32 %result } @@ -103,7 +103,7 @@ define i32 @test_udiv32(i32 %dividend, i32 %divisor) nounwind { entry: - %result = udiv i32 %dividend, %divisor + %result = udiv nof i32 %dividend, %divisor ret i32 %result } Index: test/CodeGen/X86/fast-isel-x86-64.ll =================================================================== --- test/CodeGen/X86/fast-isel-x86-64.ll +++ test/CodeGen/X86/fast-isel-x86-64.ll @@ -114,16 +114,16 @@ ; CHECK: imulq $7, %rdi, %rax } -; rdar://9297011 - Don't reject udiv by a power of 2. +; rdar://9297011 - Don't reject udiv nof by a power of 2. define i32 @test10(i32 %X) nounwind { - %Y = udiv i32 %X, 8 + %Y = udiv nof i32 %X, 8 ret i32 %Y ; CHECK-LABEL: test10: ; CHECK: shrl $3, } define i32 @test11(i32 %X) nounwind { - %Y = sdiv exact i32 %X, 8 + %Y = sdiv exact nof i32 %X, 8 ret i32 %Y ; CHECK-LABEL: test11: ; CHECK: sarl $3, Index: test/CodeGen/X86/fold-vector-shuffle-crash.ll =================================================================== --- test/CodeGen/X86/fold-vector-shuffle-crash.ll +++ test/CodeGen/X86/fold-vector-shuffle-crash.ll @@ -170,7 +170,7 @@ %E103 = extractelement <8 x i16> %I22, i32 6 %Shuff104 = shufflevector <2 x double> %BC91, <2 x double> %BC91, <2 x i32> <i32 1, i32 3> %I105 = insertelement <8 x i64> %Shuff96, i64 198384, i32 7 - %B106 = sdiv <8 x i32> %B52, %I45 + %B106 = sdiv nof <8 x i32> %B52, %I45 %ZE107 = zext i16 0 to i32 %Sl108 = select i1 %E43, <16 x i8> %Shuff29, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> %Cmp109 = icmp slt <16 x i64> zeroinitializer, zeroinitializer Index: test/CodeGen/X86/greedy_regalloc_bad_eviction_sequence.ll =================================================================== --- test/CodeGen/X86/greedy_regalloc_bad_eviction_sequence.ll +++ test/CodeGen/X86/greedy_regalloc_bad_eviction_sequence.ll @@ -88,7 +88,7 @@ %add = add nsw i32 %x1.071, %x0.070 %sub = sub nsw i32 %x1.071, %x2.072 %mul = mul nsw i32 %x3.073, %x2.072 - %div = sdiv i32 %x3.073, %x4.074 + %div = sdiv nof i32 %x3.073, %x4.074 %add19 = add nsw i32 %x5.075, %x4.074 %sub20 = sub nsw i32 %x5.075, %x6.076 %add21 = add nsw i32 %x7.077, %x6.076 Index: test/CodeGen/X86/handle-move.ll =================================================================== --- test/CodeGen/X86/handle-move.ll +++ test/CodeGen/X86/handle-move.ll @@ -19,7 +19,7 @@ define i32 @f1(i32 %a, i32 %b, i32 %c) nounwind uwtable readnone ssp { entry: %y = add i32 %c, 1 - %x = udiv i32 %b, %a + %x = udiv nof i32 %b, %a %add = add nsw i32 %y, %x ret i32 %add } @@ -68,7 +68,7 @@ %x = sub i32 %a, %b %y = sub i32 %b, %c %z = sub i32 %c, %d - %r1 = udiv i32 %x, %y + %r1 = udiv nof i32 %x, %y %r2 = mul i32 %z, %r1 ret i32 %r2 } Index: test/CodeGen/X86/hoist-invariant-load.ll =================================================================== --- test/CodeGen/X86/hoist-invariant-load.ll +++ test/CodeGen/X86/hoist-invariant-load.ll @@ -77,7 +77,7 @@ %i = phi i32 [ 0, %entry ], [ %inc, %for.check ] %x1_load = load i32, i32* %x1, align 8, !invariant.load !0 %x2_load = load i32, i32* %x2, align 8, !invariant.load !0 - %x_quot = udiv i32 %x1_load, %x2_load + %x_quot = udiv nof i32 %x1_load, %x2_load %y_elem = getelementptr inbounds i32, i32* %y, i32 %i %y_load = load i32, i32* %y_elem, align 8 %y_plus = add i32 %x_quot, %y_load Index: test/CodeGen/X86/i128-sdiv.ll =================================================================== --- test/CodeGen/X86/i128-sdiv.ll +++ test/CodeGen/X86/i128-sdiv.ll @@ -5,20 +5,20 @@ define i128 @test1(i128 %x) { ; CHECK-LABEL: test1: ; CHECK-NOT: call - %tmp = sdiv i128 %x, 73786976294838206464 + %tmp = sdiv nof i128 %x, 73786976294838206464 ret i128 %tmp } define i128 @test2(i128 %x) { ; CHECK-LABEL: test2: ; CHECK-NOT: call - %tmp = sdiv i128 %x, -73786976294838206464 + %tmp = sdiv nof i128 %x, -73786976294838206464 ret i128 %tmp } define i128 @test3(i128 %x) { ; CHECK-LABEL: test3: ; CHECK: call - %tmp = sdiv i128 %x, -73786976294838206467 + %tmp = sdiv nof i128 %x, -73786976294838206467 ret i128 %tmp } Index: test/CodeGen/X86/known-bits-vector.ll =================================================================== --- test/CodeGen/X86/known-bits-vector.ll +++ test/CodeGen/X86/known-bits-vector.ll @@ -319,7 +319,7 @@ ; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X64-NEXT: retq %1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767> - %2 = udiv <4 x i32> %1, %a1 + %2 = udiv nof <4 x i32> %1, %a1 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3> %4 = lshr <4 x i32> %3, <i32 22, i32 22, i32 22, i32 22> ret <4 x i32> %4 Index: test/CodeGen/X86/known-bits.ll =================================================================== --- test/CodeGen/X86/known-bits.ll +++ test/CodeGen/X86/known-bits.ll @@ -86,7 +86,7 @@ BB: %L5 = load i8, i8* %0 %Sl9 = select i1 true, i8 %L5, i8 undef - %B21 = udiv i8 %Sl9, -93 + %B21 = udiv nof i8 %Sl9, -93 br label %CF CF: ; preds = %CF246, %BB Index: test/CodeGen/X86/legalizedag_vec.ll =================================================================== --- test/CodeGen/X86/legalizedag_vec.ll +++ test/CodeGen/X86/legalizedag_vec.ll @@ -9,7 +9,7 @@ ; promoted. define <2 x i64> @test_long_div(<2 x i64> %num, <2 x i64> %div) { - %div.r = sdiv <2 x i64> %num, %div + %div.r = sdiv nof <2 x i64> %num, %div ret <2 x i64> %div.r } Index: test/CodeGen/X86/libcall-sret.ll =================================================================== --- test/CodeGen/X86/libcall-sret.ll +++ test/CodeGen/X86/libcall-sret.ll @@ -33,7 +33,7 @@ ; CHECK-DAG: movl [[RES1]], var+4 ; CHECK-DAG: movl [[RES2]], var+8 ; CHECK-DAG: movl [[RES3]], var+12 - %quot = udiv i128 %l, %r + %quot = udiv nof i128 %l, %r store i128 %quot, i128* @var ret void } Index: test/CodeGen/X86/lsr-normalization.ll =================================================================== --- test/CodeGen/X86/lsr-normalization.ll +++ test/CodeGen/X86/lsr-normalization.ll @@ -57,7 +57,7 @@ br i1 %tmp21, label %bb23, label %bb16 bb23: ; preds = %bb16 - %tmp24 = udiv i64 100, %tmp22 ; <i64> [#uses=1] + %tmp24 = udiv nof i64 100, %tmp22 ; <i64> [#uses=1] br label %bb25 bb25: ; preds = %bb25, %bb23 Index: test/CodeGen/X86/misched-new.ll =================================================================== --- test/CodeGen/X86/misched-new.ll +++ test/CodeGen/X86/misched-new.ll @@ -93,7 +93,7 @@ %tmp = load i8, i8* undef, align 1 %tmp6 = sub i8 0, %tmp %tmp7 = load i8, i8* undef, align 1 - %tmp8 = udiv i8 %tmp6, %tmp7 + %tmp8 = udiv nof i8 %tmp6, %tmp7 %tmp9 = zext i8 %tmp8 to i64 %tmp10 = load i8, i8* undef, align 1 %tmp11 = zext i8 %tmp10 to i64 Index: test/CodeGen/X86/optimize-max-0.ll =================================================================== --- test/CodeGen/X86/optimize-max-0.ll +++ test/CodeGen/X86/optimize-max-0.ll @@ -10,7 +10,7 @@ entry: %0 = mul i32 %x, %w %1 = mul i32 %x, %w - %2 = sdiv i32 %1, 4 + %2 = sdiv nof i32 %1, 4 %.sum2 = add i32 %2, %0 %cond = icmp eq i32 %d, 1 br i1 %cond, label %bb29, label %bb10.preheader @@ -72,9 +72,9 @@ br i1 true, label %bb.nph7, label %bb9 bb.nph5: ; preds = %bb18.loopexit - %14 = sdiv i32 %w, 2 + %14 = sdiv nof i32 %w, 2 %15 = icmp slt i32 %w, 2 - %16 = sdiv i32 %x, 2 + %16 = sdiv nof i32 %x, 2 br i1 %15, label %bb18.bb20_crit_edge.split, label %bb.nph5.split bb.nph5.split: ; preds = %bb.nph5 @@ -94,7 +94,7 @@ bb.nph3: ; preds = %bb13 %22 = add i32 %17, %0 %23 = add i32 %17, %.sum2 - %24 = sdiv i32 %w, 2 + %24 = sdiv nof i32 %w, 2 %tmp = icmp sgt i32 1, %24 %smax = select i1 %tmp, i32 1, i32 %24 br label %bb14 @@ -152,7 +152,7 @@ bb22: ; preds = %bb20 %37 = mul i32 %x, %w - %38 = sdiv i32 %37, 4 + %38 = sdiv nof i32 %37, 4 %.sum3 = add i32 %38, %.sum2 %39 = add i32 %x, 15 %40 = and i32 %39, -16 @@ -189,7 +189,7 @@ %.sum4 = add i32 %.sum3, %49 %50 = getelementptr i8, i8* %j, i32 %.sum4 %51 = mul i32 %x, %w - %52 = sdiv i32 %51, 2 + %52 = sdiv nof i32 %51, 2 tail call void @llvm.memset.p0i8.i32(i8* %50, i8 -128, i32 %52, i32 1, i1 false) ret void @@ -223,7 +223,7 @@ %60 = mul i32 %x, %w %61 = getelementptr i8, i8* %j, i32 %60 %62 = mul i32 %x, %w - %63 = sdiv i32 %62, 2 + %63 = sdiv nof i32 %62, 2 tail call void @llvm.memset.p0i8.i32(i8* %61, i8 -128, i32 %63, i32 1, i1 false) ret void @@ -235,7 +235,7 @@ entry: %0 = mul i32 %x, %w %1 = mul i32 %x, %w - %2 = udiv i32 %1, 4 + %2 = udiv nof i32 %1, 4 %.sum2 = add i32 %2, %0 %cond = icmp eq i32 %d, 1 br i1 %cond, label %bb29, label %bb10.preheader @@ -297,9 +297,9 @@ br i1 true, label %bb.nph7, label %bb9 bb.nph5: ; preds = %bb18.loopexit - %14 = udiv i32 %w, 2 + %14 = udiv nof i32 %w, 2 %15 = icmp ult i32 %w, 2 - %16 = udiv i32 %x, 2 + %16 = udiv nof i32 %x, 2 br i1 %15, label %bb18.bb20_crit_edge.split, label %bb.nph5.split bb.nph5.split: ; preds = %bb.nph5 @@ -319,7 +319,7 @@ bb.nph3: ; preds = %bb13 %22 = add i32 %17, %0 %23 = add i32 %17, %.sum2 - %24 = udiv i32 %w, 2 + %24 = udiv nof i32 %w, 2 %tmp = icmp ugt i32 1, %24 %smax = select i1 %tmp, i32 1, i32 %24 br label %bb14 @@ -377,7 +377,7 @@ bb22: ; preds = %bb20 %37 = mul i32 %x, %w - %38 = udiv i32 %37, 4 + %38 = udiv nof i32 %37, 4 %.sum3 = add i32 %38, %.sum2 %39 = add i32 %x, 15 %40 = and i32 %39, -16 @@ -414,7 +414,7 @@ %.sum4 = add i32 %.sum3, %49 %50 = getelementptr i8, i8* %j, i32 %.sum4 %51 = mul i32 %x, %w - %52 = udiv i32 %51, 2 + %52 = udiv nof i32 %51, 2 tail call void @llvm.memset.p0i8.i32(i8* %50, i8 -128, i32 %52, i32 1, i1 false) ret void @@ -448,7 +448,7 @@ %60 = mul i32 %x, %w %61 = getelementptr i8, i8* %j, i32 %60 %62 = mul i32 %x, %w - %63 = udiv i32 %62, 2 + %63 = udiv nof i32 %62, 2 tail call void @llvm.memset.p0i8.i32(i8* %61, i8 -128, i32 %63, i32 1, i1 false) ret void Index: test/CodeGen/X86/phys_subreg_coalesce-2.ll =================================================================== --- test/CodeGen/X86/phys_subreg_coalesce-2.ll +++ test/CodeGen/X86/phys_subreg_coalesce-2.ll @@ -23,7 +23,7 @@ %divisor.02 = add i32 %indvar, 1 ; <i32> [#uses=2] %n.addr.03 = sub i32 %n, %indvar ; <i32> [#uses=1] %mul = mul i32 %n.addr.03, %accumulator.01 ; <i32> [#uses=1] - %div = udiv i32 %mul, %divisor.02 ; <i32> [#uses=2] + %div = udiv nof i32 %mul, %divisor.02 ; <i32> [#uses=2] %inc = add i32 %indvar, 2 ; <i32> [#uses=1] %cmp4 = icmp ugt i32 %inc, %k ; <i1> [#uses=1] br i1 %cmp4, label %afterfor, label %forbody Index: test/CodeGen/X86/pr14088.ll =================================================================== --- test/CodeGen/X86/pr14088.ll +++ test/CodeGen/X86/pr14088.ll @@ -9,7 +9,7 @@ store i16 %conv3, i16* %tm_year2 %sext = shl i32 %rem, 16 %conv5 = ashr exact i32 %sext, 16 - %div = sdiv i32 %conv5, 10 + %div = sdiv nof i32 %conv5, 10 %conv6 = trunc i32 %div to i8 store i8 %conv6, i8* %bar br label %return Index: test/CodeGen/X86/pr16807.ll =================================================================== --- test/CodeGen/X86/pr16807.ll +++ test/CodeGen/X86/pr16807.ll @@ -2,7 +2,7 @@ define <16 x i16> @f_fu(<16 x i16> %bf) { allocas: - %avg.i.i = sdiv <16 x i16> %bf, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> + %avg.i.i = sdiv nof <16 x i16> %bf, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> ret <16 x i16> %avg.i.i } Index: test/CodeGen/X86/pr2659.ll =================================================================== --- test/CodeGen/X86/pr2659.ll +++ test/CodeGen/X86/pr2659.ll @@ -35,7 +35,7 @@ %divisor.02 = add i32 %indvar, 1 ; <i32> [#uses=2] %n.addr.03 = sub i32 %n, %indvar ; <i32> [#uses=1] %mul = mul i32 %n.addr.03, %accumulator.01 ; <i32> [#uses=1] - %div = udiv i32 %mul, %divisor.02 ; <i32> [#uses=2] + %div = udiv nof i32 %mul, %divisor.02 ; <i32> [#uses=2] %inc = add i32 %indvar, 2 ; <i32> [#uses=1] %cmp4 = icmp ugt i32 %inc, %k ; <i1> [#uses=1] br i1 %cmp4, label %afterfor, label %forbody Index: test/CodeGen/X86/pr26870.ll =================================================================== --- test/CodeGen/X86/pr26870.ll +++ test/CodeGen/X86/pr26870.ll @@ -10,7 +10,7 @@ %1 = insertelement <2 x i64> undef, i64 %call.i, i32 0 %2 = insertelement <2 x i64> %1, i64 %call.i8, i32 1 %3 = add nsw <2 x i64> %2, <i64 7, i64 7> - %4 = sdiv <2 x i64> %3, <i64 8, i64 8> + %4 = sdiv nof <2 x i64> %3, <i64 8, i64 8> %5 = add nsw <2 x i64> %4, <i64 1, i64 1> %6 = load i32, i32* %getTypeAllocSize___trans_tmp_2.i, align 4 %7 = insertelement <2 x i32> undef, i32 %0, i32 0 Index: test/CodeGen/X86/pr30813.ll =================================================================== --- test/CodeGen/X86/pr30813.ll +++ test/CodeGen/X86/pr30813.ll @@ -16,7 +16,7 @@ %bf.cast18158 = sext i48 %load1 to i64 %conv18159 = trunc i64 %bf.cast18158 to i32 %conv18160 = sext i32 %conv18159 to i64 - %div18162 = udiv i64 %conv, %conv18160 + %div18162 = udiv nof i64 %conv, %conv18160 %and18163 = and i64 %conv18098, %div18162 %shr18164 = lshr i64 %and1, %and18163 %conv18165 = trunc i64 %shr18164 to i16 Index: test/CodeGen/X86/pr32282.ll =================================================================== --- test/CodeGen/X86/pr32282.ll +++ test/CodeGen/X86/pr32282.ll @@ -89,7 +89,7 @@ %18 = ashr i64 %4, %17 %19 = and i64 %18, 9223372036854775806 %20 = add nsw i64 7, %19 - %21 = sdiv i64 0, %20 + %21 = sdiv nof i64 0, %20 %22 = icmp ne i64 %21, 0 %23 = zext i1 %22 to i8 store i8 %23, i8* %1, align 1 Index: test/CodeGen/X86/pr32588.ll =================================================================== --- test/CodeGen/X86/pr32588.ll +++ test/CodeGen/X86/pr32588.ll @@ -20,7 +20,7 @@ %tobool4 = icmp ne i32 undef, 0 %2 = and i1 %tobool4, %tobool2 %sub = sext i1 %2 to i32 - %div = sdiv i32 %sub, 2 + %div = sdiv nof i32 %sub, 2 %add = add nsw i32 %div, %xor store i32 %add, i32* @d, align 4 ret void Index: test/CodeGen/X86/pr33396.ll =================================================================== --- test/CodeGen/X86/pr33396.ll +++ test/CodeGen/X86/pr33396.ll @@ -20,7 +20,7 @@ bb3: %tmp = phi i32 [ 60, %bb2 ], - [ sdiv (i32 60, i32 zext (i1 icmp eq (i8* getelementptr ([2 x i8], [2 x i8]* @global.1, i64 0, i64 1), + [ sdiv nof (i32 60, i32 zext (i1 icmp eq (i8* getelementptr ([2 x i8], [2 x i8]* @global.1, i64 0, i64 1), i8* getelementptr ([2 x i8], [2 x i8]* @global, i64 0, i64 1)) to i32)), %bb1 ] %tmp4 = icmp slt i8 %tinky, -4 br label %bb1 Index: test/CodeGen/X86/pr3366.ll =================================================================== --- test/CodeGen/X86/pr3366.ll +++ test/CodeGen/X86/pr3366.ll @@ -4,7 +4,7 @@ define void @_ada_c34002a() nounwind { entry: %0 = load i8, i8* null, align 1 - %1 = sdiv i8 90, %0 + %1 = sdiv nof i8 90, %0 %2 = icmp ne i8 %1, 3 %3 = zext i1 %2 to i8 %toBool449 = icmp ne i8 %3, 0 Index: test/CodeGen/X86/pr33828.ll =================================================================== --- test/CodeGen/X86/pr33828.ll +++ test/CodeGen/X86/pr33828.ll @@ -32,7 +32,7 @@ %conv7 = sext i8 %tmp1 to i32 %conv8 = zext i16 %phitmp to i32 %mul = shl nuw nsw i32 %conv8, 1 - %div9 = udiv i32 %mul, 71 + %div9 = udiv nof i32 %mul, 71 %sub = add nsw i32 %div9, -3 %shl = shl i32 1, %sub %neg = xor i32 %shl, -1 Index: test/CodeGen/X86/pr34080-2.ll =================================================================== --- test/CodeGen/X86/pr34080-2.ll +++ test/CodeGen/X86/pr34080-2.ll @@ -96,14 +96,14 @@ %11 = select i1 %9, i32 %10, i32 %6 %12 = sext i1 %9 to i32 %13 = add i32 %4, %12 - %14 = sdiv i32 %13, -100 - %15 = sdiv i32 %13, 400 + %14 = sdiv nof i32 %13, -100 + %15 = sdiv nof i32 %13, 400 %16 = mul i32 %13, 36525 %17 = add i32 %16, 172251900 - %18 = sdiv i32 %17, 100 + %18 = sdiv nof i32 %17, 100 %19 = mul i32 %11, 306001 %20 = add i32 %19, 306001 - %21 = sdiv i32 %20, 10000 + %21 = sdiv nof i32 %20, 10000 %22 = add i32 %8, 2 %23 = add i32 %22, %14 %24 = add i32 %23, %15 Index: test/CodeGen/X86/pr35636.ll =================================================================== --- test/CodeGen/X86/pr35636.ll +++ test/CodeGen/X86/pr35636.ll @@ -18,7 +18,7 @@ ; CHECK-NEXT: movb %al, (%rax) ; CHECK-NEXT: retq bb: - %tmp = udiv i64 %arg, 100000000000000 + %tmp = udiv nof i64 %arg, 100000000000000 %tmp1 = mul nuw nsw i64 %tmp, 281474977 %tmp2 = lshr i64 %tmp1, 20 %tmp3 = trunc i64 %tmp2 to i32 Index: test/CodeGen/X86/rem_crash.ll =================================================================== --- test/CodeGen/X86/rem_crash.ll +++ test/CodeGen/X86/rem_crash.ll @@ -3,7 +3,7 @@ define i8 @test_minsize_uu8(i8 %x) minsize optsize { entry: - %0 = udiv i8 %x, 10 + %0 = udiv nof i8 %x, 10 %1 = urem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -11,7 +11,7 @@ define i8 @test_minsize_ss8(i8 %x) minsize optsize { entry: - %0 = sdiv i8 %x, 10 + %0 = sdiv nof i8 %x, 10 %1 = srem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -19,7 +19,7 @@ define i8 @test_minsize_us8(i8 %x) minsize optsize { entry: - %0 = udiv i8 %x, 10 + %0 = udiv nof i8 %x, 10 %1 = srem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -27,7 +27,7 @@ define i8 @test_minsize_su8(i8 %x) minsize optsize { entry: - %0 = sdiv i8 %x, 10 + %0 = sdiv nof i8 %x, 10 %1 = urem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -35,7 +35,7 @@ define i16 @test_minsize_uu16(i16 %x) minsize optsize { entry: - %0 = udiv i16 %x, 10 + %0 = udiv nof i16 %x, 10 %1 = urem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -43,7 +43,7 @@ define i16 @test_minsize_ss16(i16 %x) minsize optsize { entry: - %0 = sdiv i16 %x, 10 + %0 = sdiv nof i16 %x, 10 %1 = srem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -51,7 +51,7 @@ define i16 @test_minsize_us16(i16 %x) minsize optsize { entry: - %0 = udiv i16 %x, 10 + %0 = udiv nof i16 %x, 10 %1 = srem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -59,7 +59,7 @@ define i16 @test_minsize_su16(i16 %x) minsize optsize { entry: - %0 = sdiv i16 %x, 10 + %0 = sdiv nof i16 %x, 10 %1 = urem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -67,7 +67,7 @@ define i32 @test_minsize_uu32(i32 %x) minsize optsize { entry: - %0 = udiv i32 %x, 10 + %0 = udiv nof i32 %x, 10 %1 = urem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -75,7 +75,7 @@ define i32 @test_minsize_ss32(i32 %x) minsize optsize { entry: - %0 = sdiv i32 %x, 10 + %0 = sdiv nof i32 %x, 10 %1 = srem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -83,7 +83,7 @@ define i32 @test_minsize_us32(i32 %x) minsize optsize { entry: - %0 = udiv i32 %x, 10 + %0 = udiv nof i32 %x, 10 %1 = srem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -91,7 +91,7 @@ define i32 @test_minsize_su32(i32 %x) minsize optsize { entry: - %0 = sdiv i32 %x, 10 + %0 = sdiv nof i32 %x, 10 %1 = urem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -99,7 +99,7 @@ define i64 @test_minsize_uu64(i64 %x) minsize optsize { entry: - %0 = udiv i64 %x, 10 + %0 = udiv nof i64 %x, 10 %1 = urem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -107,7 +107,7 @@ define i64 @test_minsize_ss64(i64 %x) minsize optsize { entry: - %0 = sdiv i64 %x, 10 + %0 = sdiv nof i64 %x, 10 %1 = srem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -115,7 +115,7 @@ define i64 @test_minsize_us64(i64 %x) minsize optsize { entry: - %0 = udiv i64 %x, 10 + %0 = udiv nof i64 %x, 10 %1 = srem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -123,7 +123,7 @@ define i64 @test_minsize_su64(i64 %x) minsize optsize { entry: - %0 = sdiv i64 %x, 10 + %0 = sdiv nof i64 %x, 10 %1 = urem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -131,7 +131,7 @@ define i8 @test_uu8(i8 %x) optsize { entry: - %0 = udiv i8 %x, 10 + %0 = udiv nof i8 %x, 10 %1 = urem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -139,7 +139,7 @@ define i8 @test_ss8(i8 %x) optsize { entry: - %0 = sdiv i8 %x, 10 + %0 = sdiv nof i8 %x, 10 %1 = srem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -147,7 +147,7 @@ define i8 @test_us8(i8 %x) optsize { entry: - %0 = udiv i8 %x, 10 + %0 = udiv nof i8 %x, 10 %1 = srem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -155,7 +155,7 @@ define i8 @test_su8(i8 %x) optsize { entry: - %0 = sdiv i8 %x, 10 + %0 = sdiv nof i8 %x, 10 %1 = urem i8 %x, 10 %res = add i8 %0, %1 ret i8 %res @@ -163,7 +163,7 @@ define i16 @test_uu16(i16 %x) optsize { entry: - %0 = udiv i16 %x, 10 + %0 = udiv nof i16 %x, 10 %1 = urem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -171,7 +171,7 @@ define i16 @test_ss16(i16 %x) optsize { entry: - %0 = sdiv i16 %x, 10 + %0 = sdiv nof i16 %x, 10 %1 = srem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -179,7 +179,7 @@ define i16 @test_us16(i16 %x) optsize { entry: - %0 = udiv i16 %x, 10 + %0 = udiv nof i16 %x, 10 %1 = srem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -187,7 +187,7 @@ define i16 @test_su16(i16 %x) optsize { entry: - %0 = sdiv i16 %x, 10 + %0 = sdiv nof i16 %x, 10 %1 = urem i16 %x, 10 %res = add i16 %0, %1 ret i16 %res @@ -195,7 +195,7 @@ define i32 @test_uu32(i32 %x) optsize { entry: - %0 = udiv i32 %x, 10 + %0 = udiv nof i32 %x, 10 %1 = urem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -203,7 +203,7 @@ define i32 @test_ss32(i32 %x) optsize { entry: - %0 = sdiv i32 %x, 10 + %0 = sdiv nof i32 %x, 10 %1 = srem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -211,7 +211,7 @@ define i32 @test_us32(i32 %x) optsize { entry: - %0 = udiv i32 %x, 10 + %0 = udiv nof i32 %x, 10 %1 = srem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -219,7 +219,7 @@ define i32 @test_su32(i32 %x) optsize { entry: - %0 = sdiv i32 %x, 10 + %0 = sdiv nof i32 %x, 10 %1 = urem i32 %x, 10 %res = add i32 %0, %1 ret i32 %res @@ -227,7 +227,7 @@ define i64 @test_uu64(i64 %x) optsize { entry: - %0 = udiv i64 %x, 10 + %0 = udiv nof i64 %x, 10 %1 = urem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -235,7 +235,7 @@ define i64 @test_ss64(i64 %x) optsize { entry: - %0 = sdiv i64 %x, 10 + %0 = sdiv nof i64 %x, 10 %1 = srem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -243,7 +243,7 @@ define i64 @test_us64(i64 %x) optsize { entry: - %0 = udiv i64 %x, 10 + %0 = udiv nof i64 %x, 10 %1 = srem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res @@ -251,7 +251,7 @@ define i64 @test_su64(i64 %x) optsize { entry: - %0 = sdiv i64 %x, 10 + %0 = sdiv nof i64 %x, 10 %1 = urem i64 %x, 10 %res = add i64 %0, %1 ret i64 %res Index: test/CodeGen/X86/scalar_widen_div.ll =================================================================== --- test/CodeGen/X86/scalar_widen_div.ll +++ test/CodeGen/X86/scalar_widen_div.ll @@ -48,7 +48,7 @@ %tmp7 = load i32, i32* %index %arrayidx8 = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %tmp6, i32 %tmp7 %tmp9 = load <2 x i32>, <2 x i32> addrspace(1)* %arrayidx8 - %tmp10 = sdiv <2 x i32> %tmp5, %tmp9 + %tmp10 = sdiv nof <2 x i32> %tmp5, %tmp9 store <2 x i32> %tmp10, <2 x i32> addrspace(1)* %arrayidx ret void } @@ -72,7 +72,7 @@ ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: movl %esi, %edx ; CHECK-NEXT: retq - %div.r = sdiv <3 x i8> %num, %div + %div.r = sdiv nof <3 x i8> %num, %div ret <3 x i8> %div.r } @@ -95,7 +95,7 @@ ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: movl %esi, %edx ; CHECK-NEXT: retq - %div.r = udiv <3 x i8> %num, %div + %div.r = udiv nof <3 x i8> %num, %div ret <3 x i8> %div.r } @@ -139,7 +139,7 @@ ; CHECK-NEXT: pinsrw $3, %r9d, %xmm0 ; CHECK-NEXT: pinsrw $4, %r8d, %xmm0 ; CHECK-NEXT: retq - %div.r = sdiv <5 x i16> %num, %div + %div.r = sdiv nof <5 x i16> %num, %div ret <5 x i16> %div.r } @@ -173,7 +173,7 @@ ; CHECK-NEXT: pinsrd $3, %eax, %xmm2 ; CHECK-NEXT: movdqa %xmm2, %xmm0 ; CHECK-NEXT: retq - %div.r = udiv <4 x i16> %num, %div + %div.r = udiv nof <4 x i16> %num, %div ret <4 x i16> %div.r } @@ -199,7 +199,7 @@ ; CHECK-NEXT: pinsrd $1, %esi, %xmm0 ; CHECK-NEXT: pinsrd $2, %ecx, %xmm0 ; CHECK-NEXT: retq - %div.r = udiv <3 x i32> %num, %div + %div.r = udiv nof <3 x i32> %num, %div ret <3 x i32> %div.r } @@ -224,7 +224,7 @@ ; CHECK-NEXT: movq %rsi, %rdx ; CHECK-NEXT: movq %rdi, %rcx ; CHECK-NEXT: retq - %div.r = sdiv <3 x i64> %num, %div + %div.r = sdiv nof <3 x i64> %num, %div ret <3 x i64> %div.r } @@ -249,7 +249,7 @@ ; CHECK-NEXT: movq %rsi, %rdx ; CHECK-NEXT: movq %rdi, %rcx ; CHECK-NEXT: retq - %div.r = udiv <3 x i64> %num, %div + %div.r = udiv nof <3 x i64> %num, %div ret <3 x i64> %div.r } @@ -448,7 +448,7 @@ %tmp4 = load <3 x i32>, <3 x i32>* %arrayidx11 ; <<3 x i32>> [#uses=1] %arrayidx7 = getelementptr inbounds <3 x i32>, <3 x i32>* %old, i32 %i.014 %tmp8 = load <3 x i32>, <3 x i32>* %arrayidx7 ; <<3 x i32>> [#uses=1] - %div = sdiv <3 x i32> %tmp4, %tmp8 + %div = sdiv nof <3 x i32> %tmp4, %tmp8 store <3 x i32> %div, <3 x i32>* %arrayidx11 %inc = add nsw i32 %i.014, 1 %exitcond = icmp eq i32 %inc, %n Index: test/CodeGen/X86/sdiv-exact.ll =================================================================== --- test/CodeGen/X86/sdiv-exact.ll +++ test/CodeGen/X86/sdiv-exact.ll @@ -1,7 +1,7 @@ ; RUN: llc -mtriple=i686-- -mattr=+sse2 < %s | FileCheck %s define i32 @test1(i32 %x) { - %div = sdiv exact i32 %x, 25 + %div = sdiv exact nof i32 %x, 25 ret i32 %div ; CHECK-LABEL: test1: ; CHECK: imull $-1030792151, 4(%esp) @@ -9,7 +9,7 @@ } define i32 @test2(i32 %x) { - %div = sdiv exact i32 %x, 24 + %div = sdiv exact nof i32 %x, 24 ret i32 %div ; CHECK-LABEL: test2: ; CHECK: sarl $3 @@ -18,7 +18,7 @@ } define <4 x i32> @test3(<4 x i32> %x) { - %div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 24, i32 24> + %div = sdiv exact nof <4 x i32> %x, <i32 24, i32 24, i32 24, i32 24> ret <4 x i32> %div ; CHECK-LABEL: test3: ; CHECK: psrad $3, Index: test/CodeGen/X86/sdiv-pow2.ll =================================================================== --- test/CodeGen/X86/sdiv-pow2.ll +++ test/CodeGen/X86/sdiv-pow2.ll @@ -3,7 +3,7 @@ ; No attributes, should not use idiv define i32 @test1(i32 inreg %x) { entry: - %div = sdiv i32 %x, 16 + %div = sdiv nof i32 %x, 16 ret i32 %div ; CHECK-LABEL: test1: ; CHECK-NOT: idivl @@ -13,7 +13,7 @@ ; Has minsize (-Oz) attribute, should generate idiv define i32 @test2(i32 inreg %x) minsize { entry: - %div = sdiv i32 %x, 16 + %div = sdiv nof i32 %x, 16 ret i32 %div ; CHECK-LABEL: test2: ; CHECK: idivl @@ -23,7 +23,7 @@ ; Has optsize (-Os) attribute, should not generate idiv define i32 @test3(i32 inreg %x) optsize { entry: - %div = sdiv i32 %x, 16 + %div = sdiv nof i32 %x, 16 ret i32 %div ; CHECK-LABEL: test3: ; CHECK-NOT: idivl Index: test/CodeGen/X86/seh-catchpad.ll =================================================================== --- test/CodeGen/X86/seh-catchpad.ll +++ test/CodeGen/X86/seh-catchpad.ll @@ -36,7 +36,7 @@ ; Function Attrs: nounwind readnone define i32 @do_div(i32 %a, i32 %b) #0 { entry: - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b ret i32 %div } Index: test/CodeGen/X86/seh-safe-div-win32.ll =================================================================== --- test/CodeGen/X86/seh-safe-div-win32.ll +++ test/CodeGen/X86/seh-safe-div-win32.ll @@ -90,7 +90,7 @@ entry: %0 = load i32, i32* %n, align 4 %1 = load i32, i32* %d, align 4 - %div = sdiv i32 %0, %1 + %div = sdiv nof i32 %0, %1 store i32 %div, i32* %r, align 4 ret void } Index: test/CodeGen/X86/seh-safe-div.ll =================================================================== --- test/CodeGen/X86/seh-safe-div.ll +++ test/CodeGen/X86/seh-safe-div.ll @@ -97,7 +97,7 @@ entry: %0 = load i32, i32* %n, align 4 %1 = load i32, i32* %d, align 4 - %div = sdiv i32 %0, %1 + %div = sdiv nof i32 %0, %1 store i32 %div, i32* %r, align 4 ret void } Index: test/CodeGen/X86/twoaddr-coalesce-3.ll =================================================================== --- test/CodeGen/X86/twoaddr-coalesce-3.ll +++ test/CodeGen/X86/twoaddr-coalesce-3.ll @@ -29,7 +29,7 @@ for.body: ; preds = %for.body.lr.ph, %for.body %add5 = phi i32 [ %total.promoted, %for.body.lr.ph ], [ %add, %for.body ] %i.04 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] - %div = sdiv i32 %i.04, 2 + %div = sdiv nof i32 %i.04, 2 %add = add nsw i32 %div, %add5 %inc = add nuw nsw i32 %i.04, 1 %cmp = icmp slt i32 %inc, %0 @@ -67,7 +67,7 @@ for.body: ; preds = %for.body.lr.ph, %for.body %add5 = phi i32 [ %total.promoted, %for.body.lr.ph ], [ %add, %for.body ] %i.04 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] - %div = sdiv i32 %i.04, 2 + %div = sdiv nof i32 %i.04, 2 %add = add nsw i32 %div, %add5 store volatile i32 %add, i32* @g, align 4 %inc = add nuw nsw i32 %i.04, 1 Index: test/CodeGen/X86/twoaddr-coalesce.ll =================================================================== --- test/CodeGen/X86/twoaddr-coalesce.ll +++ test/CodeGen/X86/twoaddr-coalesce.ll @@ -10,7 +10,7 @@ bb1: ; preds = %bb1, %bb1.thread %i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ] ; <i32> [#uses=2] %0 = trunc i32 %i.0.reg2mem.0 to i8 ; <i8> [#uses=1] - %1 = sdiv i8 %0, 2 ; <i8> [#uses=1] + %1 = sdiv nof i8 %0, 2 ; <i8> [#uses=1] %2 = sext i8 %1 to i32 ; <i32> [#uses=1] %3 = tail call i32 (i8*, ...) @printf(i8* getelementptr ([4 x i8], [4 x i8]* @"\01LC", i32 0, i32 0), i32 %2) nounwind ; <i32> [#uses=0] %indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2] Index: test/CodeGen/X86/unknown-location.ll =================================================================== --- test/CodeGen/X86/unknown-location.ll +++ test/CodeGen/X86/unknown-location.ll @@ -12,7 +12,7 @@ define i32 @foo(i32 %w, i32 %x, i32 %y, i32 %z) nounwind !dbg !1 { entry: %a = add i32 %w, %x, !dbg !8 - %b = sdiv i32 %a, %y + %b = sdiv nof i32 %a, %y %c = add i32 %b, %z, !dbg !8 ret i32 %c, !dbg !8 } Index: test/CodeGen/X86/unused_stackslots.ll =================================================================== --- test/CodeGen/X86/unused_stackslots.ll +++ test/CodeGen/X86/unused_stackslots.ll @@ -45,7 +45,7 @@ for.body: ; preds = %for.inc73, %entry %q.0131 = phi i32 [ 0, %entry ], [ %inc74, %for.inc73 ] %m.0130 = phi i32 [ 0, %entry ], [ %m.4, %for.inc73 ] - %div = sdiv i32 %q.0131, 2 + %div = sdiv nof i32 %q.0131, 2 %shl = shl i32 %div, 3 %rem = srem i32 %q.0131, 2 %shl1 = shl nsw i32 %rem, 3 Index: test/CodeGen/X86/vec_sdiv_to_shift.ll =================================================================== --- test/CodeGen/X86/vec_sdiv_to_shift.ll +++ test/CodeGen/X86/vec_sdiv_to_shift.ll @@ -22,7 +22,7 @@ ; AVX-NEXT: vpsraw $5, %xmm0, %xmm0 ; AVX-NEXT: retq entry: - %0 = sdiv <8 x i16> %var, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> + %0 = sdiv nof <8 x i16> %var, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> ret <8 x i16> %0 } @@ -45,7 +45,7 @@ ; AVX-NEXT: vpsraw $5, %xmm0, %xmm0 ; AVX-NEXT: retq entry: - %0 = sdiv <8 x i16> %var, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> + %0 = sdiv nof <8 x i16> %var, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> ret <8 x i16> %0 } @@ -68,7 +68,7 @@ ; AVX-NEXT: vpsrad $4, %xmm0, %xmm0 ; AVX-NEXT: retq entry: -%0 = sdiv <4 x i32> %var, <i32 16, i32 16, i32 16, i32 16> +%0 = sdiv nof <4 x i32> %var, <i32 16, i32 16, i32 16, i32 16> ret <4 x i32> %0 } @@ -94,7 +94,7 @@ ; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq entry: -%0 = sdiv <4 x i32> %var, <i32 -16, i32 -16, i32 -16, i32 -16> +%0 = sdiv nof <4 x i32> %var, <i32 -16, i32 -16, i32 -16, i32 -16> ret <4 x i32> %0 } @@ -137,7 +137,7 @@ ; AVX2-NEXT: vpsrad $6, %ymm0, %ymm0 ; AVX2-NEXT: retq entry: -%0 = sdiv <8 x i32> %var, <i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64> +%0 = sdiv nof <8 x i32> %var, <i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64, i32 64> ret <8 x i32> %0 } @@ -180,7 +180,7 @@ ; AVX2-NEXT: vpsraw $2, %ymm0, %ymm0 ; AVX2-NEXT: retq entry: - %a0 = sdiv <16 x i16> %var, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> + %a0 = sdiv nof <16 x i16> %var, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4> ret <16 x i16> %a0 } @@ -194,6 +194,6 @@ ; AVX-LABEL: sdiv_non_splat: ; AVX: # %bb.0: ; AVX-NEXT: retq - %y = sdiv <4 x i32> %x, <i32 2, i32 0, i32 0, i32 0> + %y = sdiv nof <4 x i32> %x, <i32 2, i32 0, i32 0, i32 0> ret <4 x i32> %y } Index: test/CodeGen/X86/vec_udiv_to_shift.ll =================================================================== --- test/CodeGen/X86/vec_udiv_to_shift.ll +++ test/CodeGen/X86/vec_udiv_to_shift.ll @@ -3,13 +3,13 @@ define <8 x i16> @udiv_vec8x16(<8 x i16> %var) { entry: ; CHECK: lshr <8 x i16> %var, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5> -%0 = udiv <8 x i16> %var, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> +%0 = udiv nof <8 x i16> %var, <i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32, i16 32> ret <8 x i16> %0 } define <4 x i32> @udiv_vec4x32(<4 x i32> %var) { entry: ; CHECK: lshr <4 x i32> %var, <i32 4, i32 4, i32 4, i32 4> -%0 = udiv <4 x i32> %var, <i32 16, i32 16, i32 16, i32 16> +%0 = udiv nof <4 x i32> %var, <i32 16, i32 16, i32 16, i32 16> ret <4 x i32> %0 } Index: test/CodeGen/X86/vector-idiv-sdiv-128.ll =================================================================== --- test/CodeGen/X86/vector-idiv-sdiv-128.ll +++ test/CodeGen/X86/vector-idiv-sdiv-128.ll @@ -6,7 +6,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=AVX512BW ; -; sdiv by 7 +; sdiv nof by 7 ; define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind { @@ -71,7 +71,7 @@ ; AVX-NEXT: vmovq %rdx, %xmm0 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX-NEXT: retq - %res = sdiv <2 x i64> %a, <i64 7, i64 7> + %res = sdiv nof <2 x i64> %a, <i64 7, i64 7> ret <2 x i64> %res } @@ -148,7 +148,7 @@ ; AVX2-NEXT: vpsrad $2, %xmm0, %xmm0 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq - %res = sdiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7> + %res = sdiv nof <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7> ret <4 x i32> %res } @@ -169,7 +169,7 @@ ; AVX-NEXT: vpsraw $1, %xmm0, %xmm0 ; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq - %res = sdiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %res = sdiv nof <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ret <8 x i16> %res } @@ -283,7 +283,7 @@ ; AVX512BW-NEXT: vpaddb %xmm0, %xmm1, %xmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq - %res = sdiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> + %res = sdiv nof <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> ret <16 x i8> %res } Index: test/CodeGen/X86/vector-idiv-sdiv-256.ll =================================================================== --- test/CodeGen/X86/vector-idiv-sdiv-256.ll +++ test/CodeGen/X86/vector-idiv-sdiv-256.ll @@ -4,7 +4,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX2 --check-prefix=AVX512BW ; -; sdiv by 7 +; sdiv nof by 7 ; define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind { @@ -81,7 +81,7 @@ ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq - %res = sdiv <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7> + %res = sdiv nof <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7> ret <4 x i64> %res } @@ -128,7 +128,7 @@ ; AVX2-NEXT: vpsrad $2, %ymm0, %ymm0 ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq - %res = sdiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> + %res = sdiv nof <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> ret <8 x i32> %res } @@ -155,7 +155,7 @@ ; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0 ; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq - %res = sdiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %res = sdiv nof <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ret <16 x i16> %res } @@ -242,7 +242,7 @@ ; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 ; AVX512BW-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ; AVX512BW-NEXT: retq - %res = sdiv <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> + %res = sdiv nof <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> ret <32 x i8> %res } Index: test/CodeGen/X86/vector-idiv-sdiv-512.ll =================================================================== --- test/CodeGen/X86/vector-idiv-sdiv-512.ll +++ test/CodeGen/X86/vector-idiv-sdiv-512.ll @@ -3,7 +3,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW ; -; sdiv by 7 +; sdiv nof by 7 ; define <8 x i64> @test_div7_8i64(<8 x i64> %a) nounwind { @@ -77,7 +77,7 @@ ; AVX-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 ; AVX-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX-NEXT: retq - %res = sdiv <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7> + %res = sdiv nof <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7> ret <8 x i64> %res } @@ -96,7 +96,7 @@ ; AVX-NEXT: vpsrad $2, %zmm0, %zmm0 ; AVX-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX-NEXT: retq - %res = sdiv <16 x i32> %a, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> + %res = sdiv nof <16 x i32> %a, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> ret <16 x i32> %res } @@ -121,7 +121,7 @@ ; AVX512BW-NEXT: vpsraw $1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq - %res = sdiv <32 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %res = sdiv nof <32 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ret <32 x i16> %res } @@ -193,7 +193,7 @@ ; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 ; AVX512BW-NEXT: vpaddb %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq - %res = sdiv <64 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> + %res = sdiv nof <64 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> ret <64 x i8> %res } Index: test/CodeGen/X86/vector-idiv-udiv-128.ll =================================================================== --- test/CodeGen/X86/vector-idiv-udiv-128.ll +++ test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -6,7 +6,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 --check-prefix=AVX512BW ; -; udiv by 7 +; udiv nof by 7 ; define <2 x i64> @test_div7_2i64(<2 x i64> %a) nounwind { @@ -77,7 +77,7 @@ ; AVX-NEXT: vmovq %rcx, %xmm0 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX-NEXT: retq - %res = udiv <2 x i64> %a, <i64 7, i64 7> + %res = udiv nof <2 x i64> %a, <i64 7, i64 7> ret <2 x i64> %res } @@ -143,7 +143,7 @@ ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpsrld $2, %xmm0, %xmm0 ; AVX2-NEXT: retq - %res = udiv <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7> + %res = udiv nof <4 x i32> %a, <i32 7, i32 7, i32 7, i32 7> ret <4 x i32> %res } @@ -166,7 +166,7 @@ ; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $2, %xmm0, %xmm0 ; AVX-NEXT: retq - %res = udiv <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %res = udiv nof <8 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ret <8 x i16> %res } @@ -260,7 +260,7 @@ ; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq - %res = udiv <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> + %res = udiv nof <16 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> ret <16 x i8> %res } Index: test/CodeGen/X86/vector-idiv-udiv-256.ll =================================================================== --- test/CodeGen/X86/vector-idiv-udiv-256.ll +++ test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -4,7 +4,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX2 --check-prefix=AVX512BW ; -; udiv by 7 +; udiv nof by 7 ; define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind { @@ -89,7 +89,7 @@ ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq - %res = udiv <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7> + %res = udiv nof <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7> ret <4 x i64> %res } @@ -136,7 +136,7 @@ ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpsrld $2, %ymm0, %ymm0 ; AVX2-NEXT: retq - %res = udiv <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> + %res = udiv nof <8 x i32> %a, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> ret <8 x i32> %res } @@ -166,7 +166,7 @@ ; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm0 ; AVX2-NEXT: retq - %res = udiv <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %res = udiv nof <16 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ret <16 x i16> %res } @@ -242,7 +242,7 @@ ; AVX512BW-NEXT: vpsrlw $2, %ymm0, %ymm0 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 ; AVX512BW-NEXT: retq - %res = udiv <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> + %res = udiv nof <32 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> ret <32 x i8> %res } Index: test/CodeGen/X86/vector-idiv-udiv-512.ll =================================================================== --- test/CodeGen/X86/vector-idiv-udiv-512.ll +++ test/CodeGen/X86/vector-idiv-udiv-512.ll @@ -3,7 +3,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW ; -; udiv by 7 +; udiv nof by 7 ; define <8 x i64> @test_div7_8i64(<8 x i64> %a) nounwind { @@ -85,7 +85,7 @@ ; AVX-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 ; AVX-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX-NEXT: retq - %res = udiv <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7> + %res = udiv nof <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7> ret <8 x i64> %res } @@ -104,7 +104,7 @@ ; AVX-NEXT: vpaddd %zmm3, %zmm0, %zmm0 ; AVX-NEXT: vpsrld $2, %zmm0, %zmm0 ; AVX-NEXT: retq - %res = udiv <16 x i32> %a, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> + %res = udiv nof <16 x i32> %a, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7> ret <16 x i32> %res } @@ -132,7 +132,7 @@ ; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm0 ; AVX512BW-NEXT: retq - %res = udiv <32 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> + %res = udiv nof <32 x i16> %a, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7> ret <32 x i16> %res } @@ -196,7 +196,7 @@ ; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm0 ; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 ; AVX512BW-NEXT: retq - %res = udiv <64 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> + %res = udiv nof <64 x i8> %a, <i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,i8 7, i8 7, i8 7, i8 7> ret <64 x i8> %res } Index: test/CodeGen/X86/vector-idiv.ll =================================================================== --- test/CodeGen/X86/vector-idiv.ll +++ test/CodeGen/X86/vector-idiv.ll @@ -84,6 +84,6 @@ ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq entry: - %sdiv = sdiv <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3> + %sdiv = sdiv nof <4 x i32> %a, <i32 3, i32 3, i32 3, i32 3> ret <4 x i32> %sdiv } Index: test/CodeGen/X86/x86-cmov-converter.ll =================================================================== --- test/CodeGen/X86/x86-cmov-converter.ll +++ test/CodeGen/X86/x86-cmov-converter.ll @@ -157,7 +157,7 @@ store i32 %., i32* %arrayidx, align 4 %arrayidx7 = getelementptr inbounds i32, i32* %d, i64 %indvars.iv %1 = load i32, i32* %arrayidx7, align 4 - %div = sdiv i32 %1, %b + %div = sdiv nof i32 %1, %b store i32 %div, i32* %arrayidx7, align 4 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %exitcond = icmp eq i64 %indvars.iv.next, %wide.trip.count @@ -315,7 +315,7 @@ %i = phi i32 [ %i_inc, %while.body ], [ 0, %entry ] %arr_i = getelementptr inbounds i32, i32* %arr, i32 %i %x = load i32, i32* %arr_i, align 4 - %div = udiv i32 %x, %a + %div = udiv nof i32 %x, %a %cond = icmp ugt i32 %div, %a %condOpp = icmp ule i32 %div, %a %s1 = select i1 %cond, i32 11, i32 22 Index: test/CodeGen/X86/x86_64-mul-by-const.ll =================================================================== --- test/CodeGen/X86/x86_64-mul-by-const.ll +++ test/CodeGen/X86/x86_64-mul-by-const.ll @@ -4,6 +4,6 @@ define i32 @f9188_mul365384439_shift27(i32 %A) nounwind { ; CHECK: imulq $365384439, ; CHECK: shrq $59, %rax - %tmp1 = udiv i32 %A, 1577682821 ; <i32> [#uses=1] + %tmp1 = udiv nof i32 %A, 1577682821 ; <i32> [#uses=1] ret i32 %tmp1 } Index: test/ExecutionEngine/MCJIT/test-arith.ll =================================================================== --- test/ExecutionEngine/MCJIT/test-arith.ll +++ test/ExecutionEngine/MCJIT/test-arith.ll @@ -4,30 +4,30 @@ %A = add i8 0, 12 ; <i8> [#uses=1] %B = sub i8 %A, 1 ; <i8> [#uses=2] %C = mul i8 %B, %B ; <i8> [#uses=2] - %D = sdiv i8 %C, %C ; <i8> [#uses=2] + %D = sdiv nof i8 %C, %C ; <i8> [#uses=2] %E = srem i8 %D, %D ; <i8> [#uses=0] - %F = udiv i8 5, 6 ; <i8> [#uses=0] + %F = udiv nof i8 5, 6 ; <i8> [#uses=0] %G = urem i8 6, 5 ; <i8> [#uses=0] %A.upgrd.1 = add i16 0, 12 ; <i16> [#uses=1] %B.upgrd.2 = sub i16 %A.upgrd.1, 1 ; <i16> [#uses=2] %C.upgrd.3 = mul i16 %B.upgrd.2, %B.upgrd.2 ; <i16> [#uses=2] - %D.upgrd.4 = sdiv i16 %C.upgrd.3, %C.upgrd.3 ; <i16> [#uses=2] + %D.upgrd.4 = sdiv nof i16 %C.upgrd.3, %C.upgrd.3 ; <i16> [#uses=2] %E.upgrd.5 = srem i16 %D.upgrd.4, %D.upgrd.4 ; <i16> [#uses=0] - %F.upgrd.6 = udiv i16 5, 6 ; <i16> [#uses=0] + %F.upgrd.6 = udiv nof i16 5, 6 ; <i16> [#uses=0] %G.upgrd.7 = urem i32 6, 5 ; <i32> [#uses=0] %A.upgrd.8 = add i32 0, 12 ; <i32> [#uses=1] %B.upgrd.9 = sub i32 %A.upgrd.8, 1 ; <i32> [#uses=2] %C.upgrd.10 = mul i32 %B.upgrd.9, %B.upgrd.9 ; <i32> [#uses=2] - %D.upgrd.11 = sdiv i32 %C.upgrd.10, %C.upgrd.10 ; <i32> [#uses=2] + %D.upgrd.11 = sdiv nof i32 %C.upgrd.10, %C.upgrd.10 ; <i32> [#uses=2] %E.upgrd.12 = srem i32 %D.upgrd.11, %D.upgrd.11 ; <i32> [#uses=0] - %F.upgrd.13 = udiv i32 5, 6 ; <i32> [#uses=0] + %F.upgrd.13 = udiv nof i32 5, 6 ; <i32> [#uses=0] %G1 = urem i32 6, 5 ; <i32> [#uses=0] %A.upgrd.14 = add i64 0, 12 ; <i64> [#uses=1] %B.upgrd.15 = sub i64 %A.upgrd.14, 1 ; <i64> [#uses=2] %C.upgrd.16 = mul i64 %B.upgrd.15, %B.upgrd.15 ; <i64> [#uses=2] - %D.upgrd.17 = sdiv i64 %C.upgrd.16, %C.upgrd.16 ; <i64> [#uses=2] + %D.upgrd.17 = sdiv nof i64 %C.upgrd.16, %C.upgrd.16 ; <i64> [#uses=2] %E.upgrd.18 = srem i64 %D.upgrd.17, %D.upgrd.17 ; <i64> [#uses=0] - %F.upgrd.19 = udiv i64 5, 6 ; <i64> [#uses=0] + %F.upgrd.19 = udiv nof i64 5, 6 ; <i64> [#uses=0] %G.upgrd.20 = urem i64 6, 5 ; <i64> [#uses=0] ret i32 0 } Index: test/ExecutionEngine/OrcMCJIT/test-arith.ll =================================================================== --- test/ExecutionEngine/OrcMCJIT/test-arith.ll +++ test/ExecutionEngine/OrcMCJIT/test-arith.ll @@ -4,30 +4,30 @@ %A = add i8 0, 12 ; <i8> [#uses=1] %B = sub i8 %A, 1 ; <i8> [#uses=2] %C = mul i8 %B, %B ; <i8> [#uses=2] - %D = sdiv i8 %C, %C ; <i8> [#uses=2] + %D = sdiv nof i8 %C, %C ; <i8> [#uses=2] %E = srem i8 %D, %D ; <i8> [#uses=0] - %F = udiv i8 5, 6 ; <i8> [#uses=0] + %F = udiv nof i8 5, 6 ; <i8> [#uses=0] %G = urem i8 6, 5 ; <i8> [#uses=0] %A.upgrd.1 = add i16 0, 12 ; <i16> [#uses=1] %B.upgrd.2 = sub i16 %A.upgrd.1, 1 ; <i16> [#uses=2] %C.upgrd.3 = mul i16 %B.upgrd.2, %B.upgrd.2 ; <i16> [#uses=2] - %D.upgrd.4 = sdiv i16 %C.upgrd.3, %C.upgrd.3 ; <i16> [#uses=2] + %D.upgrd.4 = sdiv nof i16 %C.upgrd.3, %C.upgrd.3 ; <i16> [#uses=2] %E.upgrd.5 = srem i16 %D.upgrd.4, %D.upgrd.4 ; <i16> [#uses=0] - %F.upgrd.6 = udiv i16 5, 6 ; <i16> [#uses=0] + %F.upgrd.6 = udiv nof i16 5, 6 ; <i16> [#uses=0] %G.upgrd.7 = urem i32 6, 5 ; <i32> [#uses=0] %A.upgrd.8 = add i32 0, 12 ; <i32> [#uses=1] %B.upgrd.9 = sub i32 %A.upgrd.8, 1 ; <i32> [#uses=2] %C.upgrd.10 = mul i32 %B.upgrd.9, %B.upgrd.9 ; <i32> [#uses=2] - %D.upgrd.11 = sdiv i32 %C.upgrd.10, %C.upgrd.10 ; <i32> [#uses=2] + %D.upgrd.11 = sdiv nof i32 %C.upgrd.10, %C.upgrd.10 ; <i32> [#uses=2] %E.upgrd.12 = srem i32 %D.upgrd.11, %D.upgrd.11 ; <i32> [#uses=0] - %F.upgrd.13 = udiv i32 5, 6 ; <i32> [#uses=0] + %F.upgrd.13 = udiv nof i32 5, 6 ; <i32> [#uses=0] %G1 = urem i32 6, 5 ; <i32> [#uses=0] %A.upgrd.14 = add i64 0, 12 ; <i64> [#uses=1] %B.upgrd.15 = sub i64 %A.upgrd.14, 1 ; <i64> [#uses=2] %C.upgrd.16 = mul i64 %B.upgrd.15, %B.upgrd.15 ; <i64> [#uses=2] - %D.upgrd.17 = sdiv i64 %C.upgrd.16, %C.upgrd.16 ; <i64> [#uses=2] + %D.upgrd.17 = sdiv nof i64 %C.upgrd.16, %C.upgrd.16 ; <i64> [#uses=2] %E.upgrd.18 = srem i64 %D.upgrd.17, %D.upgrd.17 ; <i64> [#uses=0] - %F.upgrd.19 = udiv i64 5, 6 ; <i64> [#uses=0] + %F.upgrd.19 = udiv nof i64 5, 6 ; <i64> [#uses=0] %G.upgrd.20 = urem i64 6, 5 ; <i64> [#uses=0] ret i32 0 } Index: test/ExecutionEngine/test-interp-vec-arithm_int.ll =================================================================== --- test/ExecutionEngine/test-interp-vec-arithm_int.ll +++ test/ExecutionEngine/test-interp-vec-arithm_int.ll @@ -4,33 +4,33 @@ %A_i8 = add <5 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4>, <i8 12, i8 34, i8 56, i8 78, i8 89> %B_i8 = sub <5 x i8> %A_i8, <i8 11, i8 22, i8 33, i8 44, i8 55> %C_i8 = mul <5 x i8> %B_i8, %B_i8 - %D_i8 = sdiv <5 x i8> %C_i8, %C_i8 + %D_i8 = sdiv nof <5 x i8> %C_i8, %C_i8 %E_i8 = srem <5 x i8> %D_i8, %D_i8 - %F_i8 = udiv <5 x i8> <i8 5, i8 6, i8 7, i8 8, i8 9>, <i8 6, i8 5, i8 4, i8 3, i8 2> + %F_i8 = udiv nof <5 x i8> <i8 5, i8 6, i8 7, i8 8, i8 9>, <i8 6, i8 5, i8 4, i8 3, i8 2> %G_i8 = urem <5 x i8> <i8 6, i8 7, i8 8, i8 9, i8 10>, <i8 5, i8 4, i8 2, i8 2, i8 1> %A_i16 = add <4 x i16> <i16 0, i16 1, i16 2, i16 3>, <i16 123, i16 345, i16 567, i16 789> %B_i16 = sub <4 x i16> %A_i16, <i16 111, i16 222, i16 333, i16 444> %C_i16 = mul <4 x i16> %B_i16, %B_i16 - %D_i16 = sdiv <4 x i16> %C_i16, %C_i16 + %D_i16 = sdiv nof <4 x i16> %C_i16, %C_i16 %E_i16 = srem <4 x i16> %D_i16, %D_i16 - %F_i16 = udiv <4 x i16> <i16 5, i16 6, i16 7, i16 8>, <i16 6, i16 5, i16 4, i16 3> + %F_i16 = udiv nof <4 x i16> <i16 5, i16 6, i16 7, i16 8>, <i16 6, i16 5, i16 4, i16 3> %G_i16 = urem <4 x i16> <i16 6, i16 7, i16 8, i16 9>, <i16 5, i16 4, i16 3, i16 2> %A_i32 = add <3 x i32> <i32 0, i32 1, i32 2>, <i32 1234, i32 3456, i32 5678> %B_i32 = sub <3 x i32> %A_i32, <i32 1111, i32 2222, i32 3333> %C_i32 = mul <3 x i32> %B_i32, %B_i32 - %D_i32 = sdiv <3 x i32> %C_i32, %C_i32 + %D_i32 = sdiv nof <3 x i32> %C_i32, %C_i32 %E_i32 = srem <3 x i32> %D_i32, %D_i32 - %F_i32 = udiv <3 x i32> <i32 5, i32 6, i32 7>, <i32 6, i32 5, i32 4> + %F_i32 = udiv nof <3 x i32> <i32 5, i32 6, i32 7>, <i32 6, i32 5, i32 4> %G_i32 = urem <3 x i32> <i32 6, i32 7, i32 8>, <i32 5, i32 4, i32 3> %A_i64 = add <2 x i64> <i64 0, i64 1>, <i64 12455, i64 34567> %B_i64 = sub <2 x i64> %A_i64, <i64 11111, i64 22222> %C_i64 = mul <2 x i64> %B_i64, %B_i64 - %D_i64 = sdiv <2 x i64> %C_i64, %C_i64 + %D_i64 = sdiv nof <2 x i64> %C_i64, %C_i64 %E_i64 = srem <2 x i64> %D_i64, %D_i64 - %F_i64 = udiv <2 x i64> <i64 5, i64 6>, <i64 6, i64 5> + %F_i64 = udiv nof <2 x i64> <i64 5, i64 6>, <i64 6, i64 5> %G_i64 = urem <2 x i64> <i64 6, i64 7>, <i64 5, i64 3> ret i32 0 Index: test/Feature/seh-nounwind.ll =================================================================== --- test/Feature/seh-nounwind.ll +++ test/Feature/seh-nounwind.ll @@ -7,7 +7,7 @@ define i32 @div(i32 %n, i32 %d) nounwind noinline { entry: - %div = sdiv i32 %n, %d + %div = sdiv nof i32 %n, %d ret i32 %div } Index: test/Instrumentation/DataFlowSanitizer/arith.ll =================================================================== --- test/Instrumentation/DataFlowSanitizer/arith.ll +++ test/Instrumentation/DataFlowSanitizer/arith.ll @@ -44,10 +44,10 @@ ; CHECK: load{{.*}}__dfsan_arg_tls ; CHECK: load{{.*}}__dfsan_arg_tls ; CHECK: call{{.*}}__dfsan_union - ; CHECK: sdiv i8 + ; CHECK: sdiv nof i8 ; CHECK: store{{.*}}__dfsan_retval_tls ; CHECK: ret i8 - %c = sdiv i8 %a, %b + %c = sdiv nof i8 %a, %b ret i8 %c } @@ -56,9 +56,9 @@ ; CHECK: load{{.*}}__dfsan_arg_tls ; CHECK: load{{.*}}__dfsan_arg_tls ; CHECK: call{{.*}}__dfsan_union - ; CHECK: udiv i8 + ; CHECK: udiv nof i8 ; CHECK: store{{.*}}__dfsan_retval_tls ; CHECK: ret i8 - %c = udiv i8 %a, %b + %c = udiv nof i8 %a, %b ret i8 %c } Index: test/Instrumentation/MemorySanitizer/msan_basic.ll =================================================================== --- test/Instrumentation/MemorySanitizer/msan_basic.ll +++ test/Instrumentation/MemorySanitizer/msan_basic.ll @@ -405,7 +405,7 @@ define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory { entry: - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b ret i32 %div } Index: test/Instrumentation/SanitizerCoverage/div-tracing.ll =================================================================== --- test/Instrumentation/SanitizerCoverage/div-tracing.ll +++ test/Instrumentation/SanitizerCoverage/div-tracing.ll @@ -6,7 +6,7 @@ define i32 @div_a_b(i32 %a, i32 %b) local_unnamed_addr { entry: - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b ret i32 %div } @@ -17,7 +17,7 @@ define i32 @div_a_10(i32 %a) local_unnamed_addr { entry: - %div = sdiv i32 %a, 10 + %div = sdiv nof i32 %a, 10 ret i32 %div } @@ -27,7 +27,7 @@ define i64 @div_a_b_64(i64 %a, i64 %b) local_unnamed_addr { entry: - %div = udiv i64 %a, %b + %div = udiv nof i64 %a, %b ret i64 %div } Index: test/JitListener/multiple.ll =================================================================== --- test/JitListener/multiple.ll +++ test/JitListener/multiple.ll @@ -79,7 +79,7 @@ if.end: ; preds = %entry %1 = load i32, i32* %a.addr, align 4, !dbg !27 - %div = sdiv i32 100, %1, !dbg !28 + %div = sdiv nof i32 100, %1, !dbg !28 store i32 %div, i32* %retval, !dbg !29 br label %return, !dbg !29 Index: test/Other/lint.ll =================================================================== --- test/Other/lint.ll +++ test/Other/lint.ll @@ -34,9 +34,9 @@ %gep = getelementptr {i8, i8}, {i8, i8}* %buf2, i32 0, i32 1 store i8 0, i8* %gep, align 2 ; CHECK: Division by zero - %sd = sdiv i32 2, 0 + %sd = sdiv nof i32 2, 0 ; CHECK: Division by zero - %ud = udiv i32 2, 0 + %ud = udiv nof i32 2, 0 ; CHECK: Division by zero %sr = srem i32 2, 0 ; CHECK: Division by zero Index: test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-constant-numerator.ll =================================================================== --- test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-constant-numerator.ll +++ test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-constant-numerator.ll @@ -12,8 +12,8 @@ ; CHECK: icmp eq i64 [[AND]], 0 ; CHECK: [[TRUNC:%[0-9]+]] = trunc i64 %a to i32 - ; CHECK: udiv i32 -1, [[TRUNC]] - %d = sdiv i64 4294967295, %a ; 0xffff'ffff + ; CHECK: udiv nof i32 -1, [[TRUNC]] + %d = sdiv nof i64 4294967295, %a ; 0xffff'ffff ret i64 %d } @@ -21,15 +21,15 @@ ; into the bypass width, leave it as a plain 64-bit div with no bypass. ; CHECK-LABEL: @large_constant_numer define i64 @large_constant_numer(i64 %a) { - ; CHECK-NOT: udiv i32 - %d = sdiv i64 4294967296, %a ; 0x1'0000'0000 + ; CHECK-NOT: udiv nof i32 + %d = sdiv nof i64 4294967296, %a ; 0x1'0000'0000 ret i64 %d } ; For good measure, try a value larger than 2^32. ; CHECK-LABEL: @larger_constant_numer define i64 @larger_constant_numer(i64 %a) { - ; CHECK-NOT: udiv i32 - %d = sdiv i64 5000000000, %a + ; CHECK-NOT: udiv nof i32 + %d = sdiv nof i64 5000000000, %a ret i64 %d } Index: test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-not-exact.ll =================================================================== --- test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-not-exact.ll +++ test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-not-exact.ll @@ -9,8 +9,8 @@ ; ; CHECK-LABEL: @test define void @test(i64 %a, i64 %b, i64* %retptr) { - ; CHECK: udiv i32 - %d = sdiv i64 %a, %b + ; CHECK: udiv nof i32 + %d = sdiv nof i64 %a, %b store i64 %d, i64* %retptr ret void } Index: test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-special-cases.ll =================================================================== --- test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-special-cases.ll +++ test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div-special-cases.ll @@ -31,17 +31,17 @@ ; CHECK-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP8:%.*]] ; CHECK: [[TMP4:%.*]] = trunc i64 [[B_1]] to i32 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[A]] to i32 -; CHECK-NEXT: [[TMP6:%.*]] = udiv i32 [[TMP5]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = udiv nof i32 [[TMP5]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64 ; CHECK-NEXT: br label [[TMP10:%.*]] -; CHECK: [[TMP9:%.*]] = sdiv i64 [[A]], [[B_1]] +; CHECK: [[TMP9:%.*]] = sdiv nof i64 [[A]], [[B_1]] ; CHECK-NEXT: br label [[TMP10]] ; CHECK: [[TMP11:%.*]] = phi i64 [ [[TMP7]], [[TMP3]] ], [ [[TMP9]], [[TMP8]] ] ; CHECK-NEXT: store i64 [[TMP11]], i64* [[RETPTR:%.*]] ; CHECK-NEXT: ret void ; %b.1 = zext i32 %b to i64 - %res = sdiv i64 %a, %b.1 + %res = sdiv nof i64 %a, %b.1 store i64 %res, i64* %retptr ret void } @@ -54,14 +54,14 @@ ; CHECK-NEXT: [[B_1:%.*]] = zext i32 [[B:%.*]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A_1]] to i32 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[B_1]] to i32 -; CHECK-NEXT: [[TMP3:%.*]] = udiv i32 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP3:%.*]] = udiv nof i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 ; CHECK-NEXT: store i64 [[TMP4]], i64* [[RETPTR:%.*]] ; CHECK-NEXT: ret void ; %a.1 = and i64 %a, 4294967295 %b.1 = zext i32 %b to i64 - %res = udiv i64 %a.1, %b.1 + %res = udiv nof i64 %a.1, %b.1 store i64 %res, i64* %retptr ret void } @@ -75,7 +75,7 @@ ; CHECK-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP9:%.*]] ; CHECK: [[TMP3:%.*]] = trunc i64 [[B]] to i32 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[A_1]] to i32 -; CHECK-NEXT: [[TMP5:%.*]] = udiv i32 [[TMP4]], [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = udiv nof i32 [[TMP4]], [[TMP3]] ; CHECK-NEXT: [[TMP6:%.*]] = urem i32 [[TMP4]], [[TMP3]] ; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP5]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[TMP6]] to i64 @@ -87,7 +87,7 @@ ; CHECK-NEXT: ret void ; %a.1 = zext i32 %a to i64 - %div = udiv i64 %a.1, %b + %div = udiv nof i64 %a.1, %b %rem = urem i64 %a.1, %b %res = add i64 %div, %rem store i64 %res, i64* %retptr @@ -99,12 +99,12 @@ define void @Test_dont_bypass_xor(i64 %a, i64 %b, i64 %l, i64* %retptr) { ; CHECK-LABEL: @Test_dont_bypass_xor( ; CHECK-NEXT: [[C:%.*]] = xor i64 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[RES:%.*]] = udiv i64 [[C]], [[L:%.*]] +; CHECK-NEXT: [[RES:%.*]] = udiv nof i64 [[C]], [[L:%.*]] ; CHECK-NEXT: store i64 [[RES]], i64* [[RETPTR:%.*]] ; CHECK-NEXT: ret void ; %c = xor i64 %a, %b - %res = udiv i64 %c, %l + %res = udiv nof i64 %c, %l store i64 %res, i64* %retptr ret void } @@ -119,7 +119,7 @@ ; CHECK-NEXT: br label [[MERGE]] ; CHECK: merge: ; CHECK-NEXT: [[E:%.*]] = phi i64 [ undef, [[ENTRY:%.*]] ], [ [[C]], [[XORPATH]] ] -; CHECK-NEXT: [[RES:%.*]] = sdiv i64 [[E]], [[L:%.*]] +; CHECK-NEXT: [[RES:%.*]] = sdiv nof i64 [[E]], [[L:%.*]] ; CHECK-NEXT: store i64 [[RES]], i64* [[RETPTR:%.*]] ; CHECK-NEXT: ret void ; @@ -133,7 +133,7 @@ merge: %e = phi i64 [ undef, %entry ], [ %c, %xorpath ] - %res = sdiv i64 %e, %l + %res = sdiv nof i64 %e, %l store i64 %res, i64* %retptr ret void } @@ -167,10 +167,10 @@ ; CHECK-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP8:%.*]] ; CHECK: [[TMP4:%.*]] = trunc i64 [[B]] to i32 ; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[LHS]] to i32 -; CHECK-NEXT: [[TMP6:%.*]] = udiv i32 [[TMP5]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = udiv nof i32 [[TMP5]], [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64 ; CHECK-NEXT: br label [[TMP10:%.*]] -; CHECK: [[TMP9:%.*]] = sdiv i64 [[LHS]], [[B]] +; CHECK: [[TMP9:%.*]] = sdiv nof i64 [[LHS]], [[B]] ; CHECK-NEXT: br label [[TMP10]] ; CHECK: [[TMP11:%.*]] = phi i64 [ [[TMP7]], [[TMP3]] ], [ [[TMP9]], [[TMP8]] ] ; CHECK-NEXT: store i64 [[TMP11]], i64* [[RETPTR:%.*]] @@ -186,7 +186,7 @@ merge: %lhs = phi i64 [ 42, %branch ], [ %a.mul, %entry ] - %res = sdiv i64 %lhs, %b + %res = sdiv nof i64 %lhs, %b store i64 %res, i64* %retptr ret void } Index: test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll =================================================================== --- test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll +++ test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll @@ -6,11 +6,11 @@ ; We only use the div instruction -- the rem should be DCE'ed. ; CHECK-LABEL: @div_only define void @div_only(i64 %a, i64 %b, i64* %retptr) { - ; CHECK: udiv i32 + ; CHECK: udiv nof i32 ; CHECK-NOT: urem - ; CHECK: sdiv i64 + ; CHECK: sdiv nof i64 ; CHECK-NOT: rem - %d = sdiv i64 %a, %b + %d = sdiv nof i64 %a, %b store i64 %d, i64* %retptr ret void } @@ -32,12 +32,12 @@ define i64 @udiv_by_constant(i32 %a) { ; CHECK-NEXT: [[A_ZEXT:%.*]] = zext i32 [[A:%.*]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A_ZEXT]] to i32 -; CHECK-NEXT: [[TMP2:%.*]] = udiv i32 [[TMP1]], 50 +; CHECK-NEXT: [[TMP2:%.*]] = udiv nof i32 [[TMP1]], 50 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 ; CHECK-NEXT: ret i64 [[TMP3]] %a.zext = zext i32 %a to i64 - %wide.div = udiv i64 %a.zext, 50 + %wide.div = udiv nof i64 %a.zext, 50 ret i64 %wide.div } @@ -60,10 +60,10 @@ ; ; CHECK-LABEL: @udiv_by_constant_negative_0( define i64 @udiv_by_constant_negative_0(i64 %a) { -; CHECK-NEXT: [[WIDE_DIV:%.*]] = udiv i64 [[A:%.*]], 50 +; CHECK-NEXT: [[WIDE_DIV:%.*]] = udiv nof i64 [[A:%.*]], 50 ; CHECK-NEXT: ret i64 [[WIDE_DIV]] - %wide.div = udiv i64 %a, 50 + %wide.div = udiv nof i64 %a, 50 ret i64 %wide.div } @@ -73,11 +73,11 @@ ; CHECK-LABEL: @udiv_by_constant_negative_1( define i64 @udiv_by_constant_negative_1(i32 %a) { ; CHECK-NEXT: [[A_ZEXT:%.*]] = zext i32 [[A:%.*]] to i64 -; CHECK-NEXT: [[WIDE_DIV:%.*]] = udiv i64 [[A_ZEXT]], 8589934592 +; CHECK-NEXT: [[WIDE_DIV:%.*]] = udiv nof i64 [[A_ZEXT]], 8589934592 ; CHECK-NEXT: ret i64 [[WIDE_DIV]] %a.zext = zext i32 %a to i64 - %wide.div = udiv i64 %a.zext, 8589934592 ;; == 1 << 33 + %wide.div = udiv nof i64 %a.zext, 8589934592 ;; == 1 << 33 ret i64 %wide.div } Index: test/Transforms/CodeGenPrepare/X86/select.ll =================================================================== --- test/Transforms/CodeGenPrepare/X86/select.ll +++ test/Transforms/CodeGenPrepare/X86/select.ll @@ -143,8 +143,8 @@ } define i32 @sdiv_no_sink(i32 %a, i32 %b) { - %div1 = sdiv i32 %a, %b - %div2 = sdiv i32 %b, %a + %div1 = sdiv nof i32 %a, %b + %div2 = sdiv nof i32 %b, %a %cmp = icmp sgt i32 %a, 5 %sel = select i1 %cmp, i32 %div1, i32 %div2 ret i32 %sel Index: test/Transforms/ConstProp/2002-05-03-DivideByZeroException.ll =================================================================== --- test/Transforms/ConstProp/2002-05-03-DivideByZeroException.ll +++ test/Transforms/ConstProp/2002-05-03-DivideByZeroException.ll @@ -4,7 +4,7 @@ ; define i32 @test() { - %R = sdiv i32 12, 0 ; <i32> [#uses=1] + %R = sdiv nof i32 12, 0 ; <i32> [#uses=1] ret i32 %R } Index: test/Transforms/ConstProp/2003-05-12-DivideError.ll =================================================================== --- test/Transforms/ConstProp/2003-05-12-DivideError.ll +++ test/Transforms/ConstProp/2003-05-12-DivideError.ll @@ -4,7 +4,7 @@ ; define i32 @test() { - %R = sdiv i32 -2147483648, -1 ; <i32> [#uses=1] + %R = sdiv nof i32 -2147483648, -1 ; <i32> [#uses=1] ret i32 %R } Index: test/Transforms/ConstProp/2007-02-23-sdiv.ll =================================================================== --- test/Transforms/ConstProp/2007-02-23-sdiv.ll +++ test/Transforms/ConstProp/2007-02-23-sdiv.ll @@ -1,5 +1,5 @@ ; RUN: llvm-as < %s | llvm-dis | grep "global i32 0" ; PR1215 -@G = global i32 sdiv (i32 0, i32 -1) +@G = global i32 sdiv nof (i32 0, i32 -1) Index: test/Transforms/ConstProp/2009-06-20-constexpr-zero-lhs.ll =================================================================== --- test/Transforms/ConstProp/2009-06-20-constexpr-zero-lhs.ll +++ test/Transforms/ConstProp/2009-06-20-constexpr-zero-lhs.ll @@ -1,8 +1,8 @@ ; RUN: llvm-as < %s | llvm-dis | not grep ptrtoint ; PR4424 @G = external global i32 -@test1 = constant i32 sdiv (i32 0, i32 ptrtoint (i32* @G to i32)) -@test2 = constant i32 udiv (i32 0, i32 ptrtoint (i32* @G to i32)) +@test1 = constant i32 sdiv nof (i32 0, i32 ptrtoint (i32* @G to i32)) +@test2 = constant i32 udiv nof (i32 0, i32 ptrtoint (i32* @G to i32)) @test3 = constant i32 srem (i32 0, i32 ptrtoint (i32* @G to i32)) @test4 = constant i32 urem (i32 0, i32 ptrtoint (i32* @G to i32)) @test5 = constant i32 lshr (i32 0, i32 ptrtoint (i32* @G to i32)) Index: test/Transforms/ConstProp/constant-expr.ll =================================================================== --- test/Transforms/ConstProp/constant-expr.ll +++ test/Transforms/ConstProp/constant-expr.ll @@ -11,9 +11,9 @@ @C = global i1 mul (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z)) ; CHECK: @C = global i1 and (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z)) -@D = global i1 sdiv (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z)) +@D = global i1 sdiv nof (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z)) ; CHECK: @D = global i1 icmp ult (i8* @X, i8* @Y) -@E = global i1 udiv (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z)) +@E = global i1 udiv nof (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z)) ; CHECK: @E = global i1 icmp ult (i8* @X, i8* @Y) @F = global i1 srem (i1 icmp ult (i8* @X, i8* @Y), i1 icmp ult (i8* @X, i8* @Z)) ; CHECK: @F = global i1 false Index: test/Transforms/ConstProp/div-zero.ll =================================================================== --- test/Transforms/ConstProp/div-zero.ll +++ test/Transforms/ConstProp/div-zero.ll @@ -5,7 +5,7 @@ define i32 @foo(i32 %ptr) { entry: %zero = sub i32 %ptr, %ptr ; <i32> [#uses=1] - %div_zero = sdiv i32 %zero, ptrtoint (i32* getelementptr (i32, i32* null, + %div_zero = sdiv nof i32 %zero, ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i32) ; <i32> [#uses=1] ret i32 %div_zero } Index: test/Transforms/ConstantHoisting/ARM/bad-cases.ll =================================================================== --- test/Transforms/ConstantHoisting/ARM/bad-cases.ll +++ test/Transforms/ConstantHoisting/ARM/bad-cases.ll @@ -50,7 +50,7 @@ ; them to a mul in the backend is larget than constant materialization savings. define void @signed_const_division(i32 %in1, i32 %in2, i32* %addr) { ; CHECK-LABEL: @signed_const_division -; CHECK: %res1 = sdiv i32 %l1, 1000000000 +; CHECK: %res1 = sdiv nof i32 %l1, 1000000000 ; CHECK: %res2 = srem i32 %l2, 1000000000 entry: br label %loop @@ -58,7 +58,7 @@ loop: %l1 = phi i32 [%res1, %loop], [%in1, %entry] %l2 = phi i32 [%res2, %loop], [%in2, %entry] - %res1 = sdiv i32 %l1, 1000000000 + %res1 = sdiv nof i32 %l1, 1000000000 store volatile i32 %res1, i32* %addr %res2 = srem i32 %l2, 1000000000 store volatile i32 %res2, i32* %addr @@ -71,7 +71,7 @@ define void @unsigned_const_division(i32 %in1, i32 %in2, i32* %addr) { ; CHECK-LABEL: @unsigned_const_division -; CHECK: %res1 = udiv i32 %l1, 1000000000 +; CHECK: %res1 = udiv nof i32 %l1, 1000000000 ; CHECK: %res2 = urem i32 %l2, 1000000000 entry: @@ -80,7 +80,7 @@ loop: %l1 = phi i32 [%res1, %loop], [%in1, %entry] %l2 = phi i32 [%res2, %loop], [%in2, %entry] - %res1 = udiv i32 %l1, 1000000000 + %res1 = udiv nof i32 %l1, 1000000000 store volatile i32 %res1, i32* %addr %res2 = urem i32 %l2, 1000000000 store volatile i32 %res2, i32* %addr Index: test/Transforms/CorrelatedValuePropagation/sdiv.ll =================================================================== --- test/Transforms/CorrelatedValuePropagation/sdiv.ll +++ test/Transforms/CorrelatedValuePropagation/sdiv.ll @@ -11,8 +11,8 @@ br i1 %cmp, label %for.body, label %for.end for.body: ; preds = %for.cond -; CHECK: %div1 = udiv i32 %j.0, 2 - %div = sdiv i32 %j.0, 2 +; CHECK: %div1 = udiv nof i32 %j.0, 2 + %div = sdiv nof i32 %j.0, 2 br label %for.cond for.end: ; preds = %for.cond @@ -30,8 +30,8 @@ br i1 %cmp, label %for.body, label %for.end for.body: ; preds = %for.cond -; CHECK: %div = sdiv i32 %j.0, 2 - %div = sdiv i32 %j.0, 2 +; CHECK: %div = sdiv nof i32 %j.0, 2 + %div = sdiv nof i32 %j.0, 2 br label %for.cond for.end: ; preds = %for.cond @@ -45,8 +45,8 @@ br i1 %cmp, label %bb, label %exit bb: -; CHECK: %div1 = udiv i32 %n, 2 - %div = sdiv i32 %n, 2 +; CHECK: %div1 = udiv nof i32 %n, 2 + %div = sdiv nof i32 %n, 2 br label %exit exit: @@ -64,11 +64,11 @@ br i1 %cmp, label %loop, label %exit loop: -; CHECK: udiv i32 %a, 6 +; CHECK: udiv nof i32 %a, 6 %a = phi i32 [ %n, %entry ], [ %div, %loop ] %cond = icmp sgt i32 %a, 4 call void(i1,...) @llvm.experimental.guard(i1 %cond) [ "deopt"() ] - %div = sdiv i32 %a, 6 + %div = sdiv nof i32 %a, 6 br i1 %cond, label %loop, label %exit exit: @@ -84,11 +84,11 @@ br i1 %cmp, label %loop, label %exit loop: -; CHECK: udiv i32 %a, 6 +; CHECK: udiv nof i32 %a, 6 %a = phi i32 [ %n, %entry ], [ %div, %loop ] %cond = icmp sgt i32 %a, 4 call void @llvm.assume(i1 %cond) - %div = sdiv i32 %a, 6 + %div = sdiv nof i32 %a, 6 %loopcond = icmp sgt i32 %div, 8 br i1 %loopcond, label %loop, label %exit Index: test/Transforms/DivRemPairs/PowerPC/div-rem-pairs.ll =================================================================== --- test/Transforms/DivRemPairs/PowerPC/div-rem-pairs.ll +++ test/Transforms/DivRemPairs/PowerPC/div-rem-pairs.ll @@ -4,39 +4,39 @@ define void @decompose_illegal_srem_same_block(i32 %a, i32 %b) { ; CHECK-LABEL: @decompose_illegal_srem_same_block( -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 %a, %b ; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], %b ; CHECK-NEXT: [[TMP2:%.*]] = sub i32 %a, [[TMP1]] ; CHECK-NEXT: call void @foo(i32 [[TMP2]], i32 [[DIV]]) ; CHECK-NEXT: ret void ; %rem = srem i32 %a, %b - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b call void @foo(i32 %rem, i32 %div) ret void } define void @decompose_illegal_urem_same_block(i32 %a, i32 %b) { ; CHECK-LABEL: @decompose_illegal_urem_same_block( -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i32 %a, %b ; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], %b ; CHECK-NEXT: [[TMP2:%.*]] = sub i32 %a, [[TMP1]] ; CHECK-NEXT: call void @foo(i32 [[TMP2]], i32 [[DIV]]) ; CHECK-NEXT: ret void ; - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b %rem = urem i32 %a, %b call void @foo(i32 %rem, i32 %div) ret void } -; Hoist and optionally decompose the sdiv because it's safe and free. +; Hoist and optionally decompose the sdiv nof because it's safe and free. ; PR31028 - https://bugs.llvm.org/show_bug.cgi?id=31028 define i32 @hoist_sdiv(i32 %a, i32 %b) { ; CHECK-LABEL: @hoist_sdiv( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 %a, %b ; CHECK-NEXT: [[TMP0:%.*]] = mul i32 [[DIV]], %b ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 %a, [[TMP0]] ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 42 @@ -53,7 +53,7 @@ br i1 %cmp, label %if, label %end if: - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b br label %end end: @@ -61,12 +61,12 @@ ret i32 %ret } -; Hoist and optionally decompose the udiv because it's safe and free. +; Hoist and optionally decompose the udiv nof because it's safe and free. define i64 @hoist_udiv(i64 %a, i64 %b) { ; CHECK-LABEL: @hoist_udiv( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[DIV:%.*]] = udiv i64 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i64 %a, %b ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[DIV]], %b ; CHECK-NEXT: [[TMP1:%.*]] = sub i64 %a, [[TMP0]] ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1]], 42 @@ -83,7 +83,7 @@ br i1 %cmp, label %if, label %end if: - %div = udiv i64 %a, %b + %div = udiv nof i64 %a, %b br label %end end: @@ -96,7 +96,7 @@ define i16 @hoist_srem(i16 %a, i16 %b) { ; CHECK-LABEL: @hoist_srem( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[DIV:%.*]] = sdiv i16 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i16 %a, %b ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[DIV]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: @@ -108,7 +108,7 @@ ; CHECK-NEXT: ret i16 [[RET]] ; entry: - %div = sdiv i16 %a, %b + %div = sdiv nof i16 %a, %b %cmp = icmp eq i16 %div, 42 br i1 %cmp, label %if, label %end @@ -126,7 +126,7 @@ define i8 @hoist_urem(i8 %a, i8 %b) { ; CHECK-LABEL: @hoist_urem( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[DIV:%.*]] = udiv i8 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i8 %a, %b ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[DIV]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: @@ -138,7 +138,7 @@ ; CHECK-NEXT: ret i8 [[RET]] ; entry: - %div = udiv i8 %a, %b + %div = udiv nof i8 %a, %b %cmp = icmp eq i8 %div, 42 br i1 %cmp, label %if, label %end @@ -160,7 +160,7 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i32 %a, %b ; CHECK-NEXT: br label %end ; CHECK: end: ; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ] @@ -172,7 +172,7 @@ br i1 %cmp, label %if, label %end if: - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b br label %end end: @@ -218,7 +218,7 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, %c +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 %a, %c ; CHECK-NEXT: br label %end ; CHECK: end: ; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ] @@ -230,7 +230,7 @@ br i1 %cmp, label %if, label %end if: - %div = sdiv i32 %a, %c + %div = sdiv nof i32 %a, %c br label %end end: @@ -243,7 +243,7 @@ define i128 @dont_hoist_urem(i128 %a, i128 %b) { ; CHECK-LABEL: @dont_hoist_urem( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[DIV:%.*]] = udiv i128 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i128 %a, %b ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i128 [[DIV]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: @@ -255,7 +255,7 @@ ; CHECK-NEXT: ret i128 [[RET]] ; entry: - %div = udiv i128 %a, %b + %div = udiv nof i128 %a, %b %cmp = icmp eq i128 %div, 42 br i1 %cmp, label %if, label %end @@ -276,7 +276,7 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 %cmp, label %if, label %else ; CHECK: if: -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 %a, %b ; CHECK-NEXT: br label %end ; CHECK: else: ; CHECK-NEXT: [[REM:%.*]] = srem i32 %a, %b @@ -289,7 +289,7 @@ br i1 %cmp, label %if, label %else if: - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b br label %end else: Index: test/Transforms/DivRemPairs/X86/div-rem-pairs.ll =================================================================== --- test/Transforms/DivRemPairs/X86/div-rem-pairs.ll +++ test/Transforms/DivRemPairs/X86/div-rem-pairs.ll @@ -5,37 +5,37 @@ define void @decompose_illegal_srem_same_block(i32 %a, i32 %b) { ; CHECK-LABEL: @decompose_illegal_srem_same_block( ; CHECK-NEXT: [[REM:%.*]] = srem i32 %a, %b -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 %a, %b ; CHECK-NEXT: call void @foo(i32 [[REM]], i32 [[DIV]]) ; CHECK-NEXT: ret void ; %rem = srem i32 %a, %b - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b call void @foo(i32 %rem, i32 %div) ret void } define void @decompose_illegal_urem_same_block(i32 %a, i32 %b) { ; CHECK-LABEL: @decompose_illegal_urem_same_block( -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i32 %a, %b ; CHECK-NEXT: [[REM:%.*]] = urem i32 %a, %b ; CHECK-NEXT: call void @foo(i32 [[REM]], i32 [[DIV]]) ; CHECK-NEXT: ret void ; - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b %rem = urem i32 %a, %b call void @foo(i32 %rem, i32 %div) ret void } -; Hoist and optionally decompose the sdiv because it's safe and free. +; Hoist and optionally decompose the sdiv nof because it's safe and free. ; PR31028 - https://bugs.llvm.org/show_bug.cgi?id=31028 define i32 @hoist_sdiv(i32 %a, i32 %b) { ; CHECK-LABEL: @hoist_sdiv( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[REM:%.*]] = srem i32 %a, %b -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 %a, %b ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: @@ -50,7 +50,7 @@ br i1 %cmp, label %if, label %end if: - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b br label %end end: @@ -58,13 +58,13 @@ ret i32 %ret } -; Hoist and optionally decompose the udiv because it's safe and free. +; Hoist and optionally decompose the udiv nof because it's safe and free. define i64 @hoist_udiv(i64 %a, i64 %b) { ; CHECK-LABEL: @hoist_udiv( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[REM:%.*]] = urem i64 %a, %b -; CHECK-NEXT: [[DIV:%.*]] = udiv i64 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i64 %a, %b ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[REM]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: @@ -79,7 +79,7 @@ br i1 %cmp, label %if, label %end if: - %div = udiv i64 %a, %b + %div = udiv nof i64 %a, %b br label %end end: @@ -92,7 +92,7 @@ define i16 @hoist_srem(i16 %a, i16 %b) { ; CHECK-LABEL: @hoist_srem( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[DIV:%.*]] = sdiv i16 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i16 %a, %b ; CHECK-NEXT: [[REM:%.*]] = srem i16 %a, %b ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[DIV]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end @@ -103,7 +103,7 @@ ; CHECK-NEXT: ret i16 [[RET]] ; entry: - %div = sdiv i16 %a, %b + %div = sdiv nof i16 %a, %b %cmp = icmp eq i16 %div, 42 br i1 %cmp, label %if, label %end @@ -121,7 +121,7 @@ define i8 @hoist_urem(i8 %a, i8 %b) { ; CHECK-LABEL: @hoist_urem( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[DIV:%.*]] = udiv i8 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i8 %a, %b ; CHECK-NEXT: [[REM:%.*]] = urem i8 %a, %b ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[DIV]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end @@ -132,7 +132,7 @@ ; CHECK-NEXT: ret i8 [[RET]] ; entry: - %div = udiv i8 %a, %b + %div = udiv nof i8 %a, %b %cmp = icmp eq i8 %div, 42 br i1 %cmp, label %if, label %end @@ -154,7 +154,7 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i32 %a, %b ; CHECK-NEXT: br label %end ; CHECK: end: ; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ] @@ -166,7 +166,7 @@ br i1 %cmp, label %if, label %end if: - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b br label %end end: @@ -212,7 +212,7 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, %c +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 %a, %c ; CHECK-NEXT: br label %end ; CHECK: end: ; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ] @@ -224,7 +224,7 @@ br i1 %cmp, label %if, label %end if: - %div = sdiv i32 %a, %c + %div = sdiv nof i32 %a, %c br label %end end: @@ -237,7 +237,7 @@ define i128 @dont_hoist_urem(i128 %a, i128 %b) { ; CHECK-LABEL: @dont_hoist_urem( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[DIV:%.*]] = udiv i128 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i128 %a, %b ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i128 [[DIV]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: @@ -249,7 +249,7 @@ ; CHECK-NEXT: ret i128 [[RET]] ; entry: - %div = udiv i128 %a, %b + %div = udiv nof i128 %a, %b %cmp = icmp eq i128 %div, 42 br i1 %cmp, label %if, label %end @@ -270,7 +270,7 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 %cmp, label %if, label %else ; CHECK: if: -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 %a, %b ; CHECK-NEXT: br label %end ; CHECK: else: ; CHECK-NEXT: [[REM:%.*]] = srem i32 %a, %b @@ -283,7 +283,7 @@ br i1 %cmp, label %if, label %else if: - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b br label %end else: Index: test/Transforms/EarlyCSE/commute.ll =================================================================== --- test/Transforms/EarlyCSE/commute.ll +++ test/Transforms/EarlyCSE/commute.ll @@ -132,7 +132,7 @@ %cmp2 = icmp sgt i8 %a, %b %m1 = select i1 %cmp1, i8 %b, i8 %a %m2 = select i1 %cmp2, i8 %a, i8 %b - %r = sdiv i8 %m1, %m2 + %r = sdiv nof i8 %m1, %m2 ret i8 %r } @@ -179,7 +179,7 @@ %cmp2 = icmp ugt i8 %b, %a %m1 = select i1 %cmp1, i8 %a, i8 %b %m2 = select i1 %cmp2, i8 %b, i8 %a - %r = udiv i8 %m1, %m2 + %r = udiv nof i8 %m1, %m2 ret i8 %r } Index: test/Transforms/GVN/PRE/local-pre.ll =================================================================== --- test/Transforms/GVN/PRE/local-pre.ll +++ test/Transforms/GVN/PRE/local-pre.ll @@ -39,7 +39,7 @@ br i1 %cmp, label %block2, label %block3 block2: - %a = sdiv i32 %p, %q + %a = sdiv nof i32 %p, %q br label %block4 block3: @@ -52,7 +52,7 @@ block4: call void @may_exit() nounwind - %b = sdiv i32 %p, %q + %b = sdiv nof i32 %p, %q ret i32 %b } @@ -66,7 +66,7 @@ br i1 %r, label %block2, label %block3 block2: - %a = sdiv i32 %p, %q + %a = sdiv nof i32 %p, %q br label %block4 block3: @@ -82,7 +82,7 @@ %phi = phi i32 [ 0, %block3 ], [ %a, %block2 ] call void @may_exit_1(i32 %phi) nounwind - %b = sdiv i32 %p, %q + %b = sdiv nof i32 %p, %q ret i32 %b } @@ -100,7 +100,7 @@ br i1 %cmp, label %block2, label %block3 block2: - %a = sdiv i32 %p, 6 + %a = sdiv nof i32 %p, 6 br label %block4 block3: @@ -113,7 +113,7 @@ block4: call void @may_exit() nounwind - %b = sdiv i32 %p, 6 + %b = sdiv nof i32 %p, 6 ret i32 %b } @@ -128,11 +128,11 @@ br i1 %cond, label %block2, label %block3 block2: - %a = sdiv i32 %p, %q + %a = sdiv nof i32 %p, %q br label %block4 block3: - %b = sdiv i32 %p, %q + %b = sdiv nof i32 %p, %q br label %block4 ; CHECK: block4: @@ -142,6 +142,6 @@ block4: call void @may_exit() nounwind - %c = sdiv i32 %p, %q + %c = sdiv nof i32 %p, %q ret i32 %c } Index: test/Transforms/GVN/calls-readonly.ll =================================================================== --- test/Transforms/GVN/calls-readonly.ll +++ test/Transforms/GVN/calls-readonly.ll @@ -11,7 +11,7 @@ br i1 %1, label %bb, label %bb1 bb: ; preds = %entry - %2 = sdiv i32 %x, %y ; <i32> [#uses=1] + %2 = sdiv nof i32 %x, %y ; <i32> [#uses=1] br label %bb1 bb1: ; preds = %bb, %entry @@ -30,7 +30,7 @@ ; CHECK-NEXT: %1 = icmp eq i32 %0, 0 ; CHECK-NEXT: br i1 %1, label %bb, label %bb1 ; CHECK: bb: -; CHECK-NEXT: %2 = sdiv i32 %x, %y +; CHECK-NEXT: %2 = sdiv nof i32 %x, %y ; CHECK-NEXT: br label %bb1 ; CHECK: bb1: ; CHECK-NEXT: %x_addr.0 = phi i32 [ %2, %bb ], [ %x, %entry ] Index: test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll =================================================================== --- test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll +++ test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll @@ -33,7 +33,7 @@ br i1 %cmp, label %if.end, label %lor.lhs.false lor.lhs.false: - %div = udiv i32 %4, %1 + %div = udiv nof i32 %4, %1 %rem = urem i32 %0, %div %cmp2 = icmp eq i32 %rem, 0 br i1 %cmp2, label %if.end, label %if.then @@ -48,7 +48,7 @@ br i1 %cmp6, label %if.end14, label %lor.lhs.false8 lor.lhs.false8: - %div9 = udiv i32 %4, %3 + %div9 = udiv nof i32 %4, %3 %rem10 = urem i32 %0, %div9 %cmp11 = icmp eq i32 %rem10, 0 br i1 %cmp11, label %if.end14, label %if.then13 @@ -62,7 +62,7 @@ br i1 %cmp17, label %if.end25, label %lor.lhs.false19 lor.lhs.false19: - %div20 = udiv i32 %4, %1 + %div20 = udiv nof i32 %4, %1 %rem21 = urem i32 %0, %div20 %cmp22 = icmp eq i32 %rem21, 0 br i1 %cmp22, label %if.end25, label %if.then24 Index: test/Transforms/GlobalOpt/ctor-list-opt-constexpr.ll =================================================================== --- test/Transforms/GlobalOpt/ctor-list-opt-constexpr.ll +++ test/Transforms/GlobalOpt/ctor-list-opt-constexpr.ll @@ -17,7 +17,7 @@ define internal void @init1() { entry: %tmp = getelementptr inbounds %struct.foo, %struct.foo* @X, i32 0, i32 0 - store i32* inttoptr (i64 sdiv (i64 ptrtoint (i32* @G to i64), i64 ptrtoint (i32* @H to i64)) to i32*), i32** %tmp, align 8 + store i32* inttoptr (i64 sdiv nof (i64 ptrtoint (i32* @G to i64), i64 ptrtoint (i32* @H to i64)) to i32*), i32** %tmp, align 8 ret void } ; CHECK-LABEL: @init1( Index: test/Transforms/IPConstantProp/PR16052.ll =================================================================== --- test/Transforms/IPConstantProp/PR16052.ll +++ test/Transforms/IPConstantProp/PR16052.ll @@ -6,7 +6,7 @@ define i64 @fn2() { entry: %conv = sext i32 undef to i64 - %div = sdiv i64 8, %conv + %div = sdiv nof i64 8, %conv %call2 = call i64 @fn1(i64 %div) ret i64 %call2 } Index: test/Transforms/IRCE/bad_expander.ll =================================================================== --- test/Transforms/IRCE/bad_expander.ll +++ test/Transforms/IRCE/bad_expander.ll @@ -69,7 +69,7 @@ br i1 %maybe_exit, label %range_check, label %exit range_check: - %div_result = udiv i64 %num, %denom + %div_result = udiv nof i64 %num, %denom %rc = icmp slt i64 %iv.next, %div_result br i1 %rc, label %guarded, label %exit @@ -87,7 +87,7 @@ ; CHECK-LABEL: test_03 ; CHECK: entry: ; CHECK-NEXT: %num = load i64, i64* %p1, align 4 -; CHECK-NEXT: [[DIV:%[^ ]+]] = udiv i64 %num, 13 +; CHECK-NEXT: [[DIV:%[^ ]+]] = udiv nof i64 %num, 13 ; CHECK-NEXT: [[DIV_MINUS_1:%[^ ]+]] = add i64 [[DIV]], -1 ; CHECK-NEXT: [[COMP1:%[^ ]+]] = icmp sgt i64 [[DIV_MINUS_1]], 0 ; CHECK-NEXT: %exit.mainloop.at = select i1 [[COMP1]], i64 [[DIV_MINUS_1]], i64 0 @@ -121,7 +121,7 @@ br i1 %maybe_exit, label %range_check, label %exit range_check: - %div_result = udiv i64 %num, 13 + %div_result = udiv nof i64 %num, 13 %rc = icmp slt i64 %iv.next, %div_result br i1 %rc, label %guarded, label %exit Index: test/Transforms/IndVarSimplify/2003-09-23-NotAtTop.ll =================================================================== --- test/Transforms/IndVarSimplify/2003-09-23-NotAtTop.ll +++ test/Transforms/IndVarSimplify/2003-09-23-NotAtTop.ll @@ -13,7 +13,7 @@ %NonIndvar = phi i32 [ 200, %0 ], [ %NonIndvarNext, %Loop ] ; <i32> [#uses=1] %Canonical = phi i32 [ 0, %0 ], [ %CanonicalNext, %Loop ] ; <i32> [#uses=2] store i32 %Canonical, i32* null - %NonIndvarNext = sdiv i32 %NonIndvar, 2 ; <i32> [#uses=1] + %NonIndvarNext = sdiv nof i32 %NonIndvar, 2 ; <i32> [#uses=1] %CanonicalNext = add i32 %Canonical, 1 ; <i32> [#uses=1] br label %Loop } Index: test/Transforms/IndVarSimplify/dangling-use.ll =================================================================== --- test/Transforms/IndVarSimplify/dangling-use.ll +++ test/Transforms/IndVarSimplify/dangling-use.ll @@ -5,8 +5,8 @@ define void @vec_inverse_5_7_vert_loop_copyseparate(i8* %x, i32 %n, i32 %rowbytes) nounwind { entry: - %tmp1 = sdiv i32 %n, 3 ; <i32> [#uses=1] - %tmp2 = sdiv i32 %rowbytes, 5 ; <i32> [#uses=2] + %tmp1 = sdiv nof i32 %n, 3 ; <i32> [#uses=1] + %tmp2 = sdiv nof i32 %rowbytes, 5 ; <i32> [#uses=2] br label %bb49 bb49: ; preds = %bb48, %entry Index: test/Transforms/IndVarSimplify/eliminate-comparison.ll =================================================================== --- test/Transforms/IndVarSimplify/eliminate-comparison.ll +++ test/Transforms/IndVarSimplify/eliminate-comparison.ll @@ -127,7 +127,7 @@ br label %forcond38 noassert: ; preds = %forbody - %tmp13 = sdiv i32 -32768, %__key6.0 + %tmp13 = sdiv nof i32 -32768, %__key6.0 %tmp2936 = shl i32 %tmp13, 24 %sext23 = shl i32 %tmp13, 24 %tmp32 = icmp eq i32 %tmp2936, %sext23 @@ -144,7 +144,7 @@ br i1 %tmp46, label %noassert68, label %unrolledend noassert68: ; preds = %forbody39 - %tmp57 = sdiv i32 -32768, %__key8.0 + %tmp57 = sdiv nof i32 -32768, %__key8.0 %sext34 = shl i32 %tmp57, 16 %sext21 = shl i32 %tmp57, 16 %tmp76 = icmp eq i32 %sext34, %sext21 @@ -178,7 +178,7 @@ br label %forcond38 noassert: ; preds = %forbody - %tmp13 = sdiv i32 -32768, %__key6.0 + %tmp13 = sdiv nof i32 -32768, %__key6.0 %tmp2936 = shl i32 %tmp13, 24 %sext23 = shl i32 %tmp13, 24 %tmp32 = icmp eq i32 %tmp2936, %sext23 @@ -195,7 +195,7 @@ br i1 %tmp46, label %noassert68, label %unrolledend noassert68: ; preds = %forbody39 - %tmp57 = sdiv i32 -32768, %__key8.0 + %tmp57 = sdiv nof i32 -32768, %__key8.0 %sext34 = shl i32 %tmp57, 16 %sext21 = shl i32 %tmp57, 16 %tmp76 = icmp ne i32 %sext34, %sext21 Index: test/Transforms/IndVarSimplify/exit_value_test2.ll =================================================================== --- test/Transforms/IndVarSimplify/exit_value_test2.ll +++ test/Transforms/IndVarSimplify/exit_value_test2.ll @@ -2,7 +2,7 @@ ; RUN: opt < %s -indvars -loop-deletion -S | FileCheck %s ; Check IndVarSimplify should not replace exit value because or else -; udiv will be introduced by expand and the cost will be high. +; udiv nof will be introduced by expand and the cost will be high. declare void @_Z3mixRjj(i32* dereferenceable(4), i32) declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) Index: test/Transforms/IndVarSimplify/exit_value_tests.ll =================================================================== --- test/Transforms/IndVarSimplify/exit_value_tests.ll +++ test/Transforms/IndVarSimplify/exit_value_tests.ll @@ -106,7 +106,7 @@ loop: ; preds = %loop, %entry %i = phi i32 [ 4, %entry ], [ %i.next, %loop ] ; <i32> [#uses=3] %i.next = add i32 %i, 8 ; <i32> [#uses=1] - %RV = udiv i32 %i, 2 ; <i32> [#uses=1] + %RV = udiv nof i32 %i, 2 ; <i32> [#uses=1] %c = icmp ne i32 %i, 68 ; <i1> [#uses=1] br i1 %c, label %loop, label %loopexit Index: test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll =================================================================== --- test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll +++ test/Transforms/IndVarSimplify/iv-widen-elim-ext.ll @@ -22,7 +22,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4 ; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP0]], [[TMP2]] ; CHECK-NEXT: [[TRUNC0:%.*]] = trunc i64 [[TMP1]] to i32 -; CHECK-NEXT: [[DIV0:%.*]] = udiv i32 5, [[TRUNC0]] +; CHECK-NEXT: [[DIV0:%.*]] = udiv nof i32 5, [[TRUNC0]] ; CHECK-NEXT: [[ADD4:%.*]] = add nsw i32 [[ADD3]], [[DIV0]] ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* %A, i64 [[INDVARS_IV]] ; CHECK-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX5]], align 4 @@ -54,7 +54,7 @@ %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %idxprom1 %1 = load i32, i32* %arrayidx2, align 4 %add3 = add nsw i32 %0, %1 - %div0 = udiv i32 5, %add + %div0 = udiv nof i32 5, %add %add4 = add nsw i32 %add3, %div0 %idxprom4 = zext i32 %i.02 to i64 %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4 Index: test/Transforms/IndVarSimplify/pr35406.ll =================================================================== --- test/Transforms/IndVarSimplify/pr35406.ll +++ test/Transforms/IndVarSimplify/pr35406.ll @@ -12,13 +12,13 @@ %local_2_ = phi i32 [ 63864, %entry ], [ %local_2_43, %loop2.exit ] %local_3_ = phi i32 [ 51, %entry ], [ %local_3_44, %loop2.exit ] ; CHECK-NOT: udiv - %0 = udiv i32 14, %local_0_ + %0 = udiv nof i32 14, %local_0_ %1 = icmp ugt i32 %local_0_, 14 br i1 %1, label %exit, label %general_case24 ; CHECK-LABEL: general_case24 general_case24: - %2 = udiv i32 60392, %0 + %2 = udiv nof i32 60392, %0 br i1 false, label %loop2, label %loop2.exit loop2: @@ -55,7 +55,7 @@ %local_3_ = phi i32 [ 51, %entry ], [ %local_3_44, %loop2.exit ] ; CHECK: udiv ; CHECK-NOT: udiv - %0 = udiv i32 14, %local_0_ + %0 = udiv nof i32 14, %local_0_ %1 = icmp ugt i32 %local_0_, 14 br i1 %1, label %exit, label %general_case24 Index: test/Transforms/IndVarSimplify/replace-sdiv-by-udiv.ll =================================================================== --- test/Transforms/IndVarSimplify/replace-sdiv-by-udiv.ll +++ test/Transforms/IndVarSimplify/replace-sdiv-by-udiv.ll @@ -7,9 +7,9 @@ for.body: ; preds = %entry, %for.body %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ] - %div = sdiv i32 %i.01, 2 + %div = sdiv nof i32 %i.01, 2 ; CHECK-NOT: sdiv -; CHECK: udiv +; CHECK: udiv nof %idxprom = sext i32 %div to i64 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom store i32 %i.01, i32* %arrayidx, align 4 @@ -28,9 +28,9 @@ for.body: ; preds = %entry, %for.body %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ] - %div = sdiv exact i32 %i.01, 2 + %div = sdiv exact nof i32 %i.01, 2 ; CHECK-NOT: sdiv -; CHECK: udiv exact +; CHECK: udiv exact nof %idxprom = sext i32 %div to i64 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom store i32 %i.01, i32* %arrayidx, align 4 @@ -50,7 +50,7 @@ for.body: ; preds = %entry, %for.body %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ] %mul = mul nsw i32 %i.01, 64 - %div = sdiv i32 %mul, %d + %div = sdiv nof i32 %mul, %d ; CHECK-NOT: udiv %idxprom = sext i32 %div to i64 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom @@ -70,8 +70,8 @@ for.body: ; preds = %entry, %for.body %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ] - %div = sdiv i32 2048, %i.01 -; CHECK: udiv + %div = sdiv nof i32 2048, %i.01 +; CHECK: udiv nof ; CHECK-NOT: sdiv %idxprom = sext i32 %div to i64 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom @@ -92,8 +92,8 @@ for.body: ; preds = %entry, %for.body %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ] %mul = mul nsw i32 %i.01, 64 - %div = sdiv i32 %mul, 8 -; CHECK: udiv + %div = sdiv nof i32 %mul, 8 +; CHECK: udiv nof ; CHECK-NOT: sdiv %idxprom = sext i32 %div to i64 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom @@ -114,8 +114,8 @@ for.body: ; preds = %entry, %for.body %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ] %mul = mul nsw i32 %i.01, 64 - %div = sdiv i32 %mul, 6 -; CHECK: udiv + %div = sdiv nof i32 %mul, 6 +; CHECK: udiv nof ; CHECK-NOT: sdiv %idxprom = sext i32 %div to i64 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom Index: test/Transforms/IndVarSimplify/sink-trapping.ll =================================================================== --- test/Transforms/IndVarSimplify/sink-trapping.ll +++ test/Transforms/IndVarSimplify/sink-trapping.ll @@ -4,7 +4,7 @@ define i32 @a(i32 %x) nounwind { for.body.preheader: - %y = sdiv i32 10, %x + %y = sdiv nof i32 10, %x br label %for.body for.body: Index: test/Transforms/IndVarSimplify/udiv-invariant-but-traps.ll =================================================================== --- test/Transforms/IndVarSimplify/udiv-invariant-but-traps.ll +++ test/Transforms/IndVarSimplify/udiv-invariant-but-traps.ll @@ -12,7 +12,7 @@ bb8: %i = phi i64 [ %i.next, %bb8 ], [ 0, %bb1 ] %i.next = add i64 %i, 1 - %div = udiv i32 1, %x + %div = udiv nof i32 1, %x %c = icmp eq i64 %i.next, 6 br i1 %c, label %bb11, label %bb8 Index: test/Transforms/IndVarSimplify/udiv.ll =================================================================== --- test/Transforms/IndVarSimplify/udiv.ll +++ test/Transforms/IndVarSimplify/udiv.ll @@ -5,7 +5,7 @@ @main.flags = internal global [8193 x i8] zeroinitializer, align 1 ; <[8193 x i8]*> [#uses=5] @.str = private constant [11 x i8] c"Count: %d\0A\00" ; <[11 x i8]*> [#uses=1] -; Indvars shouldn't emit a udiv here, because there's no udiv in the +; Indvars shouldn't emit a udiv nof here, because there's no udiv nof in the ; original code. This comes from SingleSource/Benchmarks/Shootout/sieve.c. ; CHECK-LABEL: @main( @@ -127,8 +127,8 @@ declare i32 @printf(i8* nocapture, ...) nounwind -; IndVars doesn't emit a udiv in for.body.preheader since SCEVExpander::expand will -; find out there's already a udiv in the original code. +; IndVars doesn't emit a udiv nof in for.body.preheader since SCEVExpander::expand will +; find out there's already a udiv nof in the original code. ; CHECK-LABEL: @foo( ; CHECK: for.body.preheader: @@ -136,7 +136,7 @@ define void @foo(double* %p, i64 %n) nounwind { entry: - %div0 = udiv i64 %n, 7 ; <i64> [#uses=1] + %div0 = udiv nof i64 %n, 7 ; <i64> [#uses=1] %div1 = add i64 %div0, 1 %cmp2 = icmp ult i64 0, %div1 ; <i1> [#uses=1] br i1 %cmp2, label %for.body.preheader, label %for.end @@ -149,7 +149,7 @@ %arrayidx = getelementptr inbounds double, double* %p, i64 %i.03 ; <double*> [#uses=1] store double 0.000000e+00, double* %arrayidx %inc = add i64 %i.03, 1 ; <i64> [#uses=2] - %divx = udiv i64 %n, 7 ; <i64> [#uses=1] + %divx = udiv nof i64 %n, 7 ; <i64> [#uses=1] %div = add i64 %divx, 1 %cmp = icmp ult i64 %inc, %div ; <i1> [#uses=1] br i1 %cmp, label %for.body, label %for.end.loopexit Index: test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll =================================================================== --- test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll +++ test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll @@ -148,21 +148,21 @@ bb: ; preds = %entry %3 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 4 ; <%struct.quad_struct**> [#uses=1] %4 = load %struct.quad_struct*, %struct.quad_struct** %3, align 4 ; <%struct.quad_struct*> [#uses=1] - %5 = sdiv i32 %size, 2 ; <i32> [#uses=1] + %5 = sdiv nof i32 %size, 2 ; <i32> [#uses=1] %6 = call i32 @perimeter(%struct.quad_struct* %4, i32 %5) nounwind ; <i32> [#uses=1] %7 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 5 ; <%struct.quad_struct**> [#uses=1] %8 = load %struct.quad_struct*, %struct.quad_struct** %7, align 4 ; <%struct.quad_struct*> [#uses=1] - %9 = sdiv i32 %size, 2 ; <i32> [#uses=1] + %9 = sdiv nof i32 %size, 2 ; <i32> [#uses=1] %10 = call i32 @perimeter(%struct.quad_struct* %8, i32 %9) nounwind ; <i32> [#uses=1] %11 = add i32 %10, %6 ; <i32> [#uses=1] %12 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 3 ; <%struct.quad_struct**> [#uses=1] %13 = load %struct.quad_struct*, %struct.quad_struct** %12, align 4 ; <%struct.quad_struct*> [#uses=1] - %14 = sdiv i32 %size, 2 ; <i32> [#uses=1] + %14 = sdiv nof i32 %size, 2 ; <i32> [#uses=1] %15 = call i32 @perimeter(%struct.quad_struct* %13, i32 %14) nounwind ; <i32> [#uses=1] %16 = add i32 %15, %11 ; <i32> [#uses=1] %17 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 2 ; <%struct.quad_struct**> [#uses=1] %18 = load %struct.quad_struct*, %struct.quad_struct** %17, align 4 ; <%struct.quad_struct*> [#uses=1] - %19 = sdiv i32 %size, 2 ; <i32> [#uses=1] + %19 = sdiv nof i32 %size, 2 ; <i32> [#uses=1] %20 = call i32 @perimeter(%struct.quad_struct* %18, i32 %19) nounwind ; <i32> [#uses=1] %21 = add i32 %20, %16 ; <i32> [#uses=1] ret i32 %21 Index: test/Transforms/Inline/AArch64/binop.ll =================================================================== --- test/Transforms/Inline/AArch64/binop.ll +++ test/Transforms/Inline/AArch64/binop.ll @@ -97,7 +97,7 @@ } define i32 @div1(i32 %a, i32 %b) { - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b call void @pad() store i32 0, i32* @glbl ret i32 %div @@ -112,7 +112,7 @@ } define i32 @div2(i32 %a) { - %div = sdiv i32 %a, %a + %div = sdiv nof i32 %a, %a call void @pad() ret i32 %div } Index: test/Transforms/Inline/ephemeral.ll =================================================================== --- test/Transforms/Inline/ephemeral.ll +++ test/Transforms/Inline/ephemeral.ll @@ -10,7 +10,7 @@ ; still happen). %a2 = mul i32 %a1, %a1 %a3 = sub i32 %a1, 5 - %a4 = udiv i32 %a3, -13 + %a4 = udiv nof i32 %a3, -13 %a5 = mul i32 %a4, %a4 %a6 = add i32 %a5, %a5 %ca = icmp sgt i32 %a6, -7 Index: test/Transforms/Inline/inline_constprop.ll =================================================================== --- test/Transforms/Inline/inline_constprop.ll +++ test/Transforms/Inline/inline_constprop.ll @@ -2,7 +2,7 @@ ; RUN: opt < %s -passes='cgscc(inline)' -inline-threshold=20 -S | FileCheck %s define internal i32 @callee1(i32 %A, i32 %B) { - %C = sdiv i32 %A, %B + %C = sdiv nof i32 %A, %B ret i32 %C } Index: test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll =================================================================== --- test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll +++ test/Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll @@ -2,7 +2,7 @@ define i32 @test(i1 %C, i32 %tmp.15) { %tmp.16 = select i1 %C, i32 8, i32 1 ; <i32> [#uses=1] - %tmp.18 = udiv i32 %tmp.15, %tmp.16 ; <i32> [#uses=1] + %tmp.18 = udiv nof i32 %tmp.15, %tmp.16 ; <i32> [#uses=1] ret i32 %tmp.18 } Index: test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll =================================================================== --- test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll +++ test/Transforms/InstCombine/2005-06-15-DivSelectCrash.ll @@ -2,7 +2,7 @@ define i32 @_Z13func_31585107li(i32 %l_39521025, i32 %l_59244666) { %shortcirc_val = select i1 false, i32 1, i32 0 ; <i32> [#uses=1] - %tmp.8 = udiv i32 0, %shortcirc_val ; <i32> [#uses=1] + %tmp.8 = udiv nof i32 0, %shortcirc_val ; <i32> [#uses=1] %tmp.9 = icmp eq i32 %tmp.8, 0 ; <i1> [#uses=1] %retval = select i1 %tmp.9, i32 %l_59244666, i32 -1621308501 ; <i32> [#uses=1] ret i32 %retval Index: test/Transforms/InstCombine/2005-06-16-RangeCrash.ll =================================================================== --- test/Transforms/InstCombine/2005-06-16-RangeCrash.ll +++ test/Transforms/InstCombine/2005-06-16-RangeCrash.ll @@ -2,7 +2,7 @@ ; PR585 define i1 @test() { - %tmp.26 = sdiv i32 0, -2147483648 ; <i32> [#uses=1] + %tmp.26 = sdiv nof i32 0, -2147483648 ; <i32> [#uses=1] %tmp.27 = icmp eq i32 %tmp.26, 0 ; <i1> [#uses=1] ret i1 %tmp.27 } Index: test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll =================================================================== --- test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll +++ test/Transforms/InstCombine/2005-07-07-DeadPHILoop.ll @@ -7,7 +7,7 @@ Dead: ; preds = %Dead %X = phi i32 [ %Y, %Dead ] ; <i32> [#uses=1] - %Y = sdiv i32 %X, 10 ; <i32> [#uses=2] + %Y = sdiv nof i32 %X, 10 ; <i32> [#uses=2] store i32 %Y, i32* %P br label %Dead } Index: test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll =================================================================== --- test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll +++ test/Transforms/InstCombine/2007-03-21-SignedRangeTest.ll @@ -9,7 +9,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 [[TMP6_OFF]], 11 ; CHECK-NEXT: ret i1 [[TMP1]] ; - %tmp7 = sdiv i32 %tmp6, 12 + %tmp7 = sdiv nof i32 %tmp6, 12 icmp ne i32 %tmp7, -6 ret i1 %1 } @@ -20,7 +20,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt <2 x i32> [[TMP6_OFF]], <i32 11, i32 11> ; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; - %tmp7 = sdiv <2 x i32> %tmp6, <i32 12, i32 12> + %tmp7 = sdiv nof <2 x i32> %tmp6, <i32 12, i32 12> icmp ne <2 x i32> %tmp7, <i32 -6, i32 -6> ret <2 x i1> %1 } Index: test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll =================================================================== --- test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll +++ test/Transforms/InstCombine/2007-06-21-DivCompareMiscomp.ll @@ -2,7 +2,7 @@ ; rdar://5278853 define i1 @test(i32 %tmp468) { - %tmp470 = udiv i32 %tmp468, 4 ; <i32> [#uses=2] + %tmp470 = udiv nof i32 %tmp468, 4 ; <i32> [#uses=2] %tmp475 = icmp ult i32 %tmp470, 1073741824 ; <i1> [#uses=1] ret i1 %tmp475 } Index: test/Transforms/InstCombine/2008-02-16-SDivOverflow2.ll =================================================================== --- test/Transforms/InstCombine/2008-02-16-SDivOverflow2.ll +++ test/Transforms/InstCombine/2008-02-16-SDivOverflow2.ll @@ -1,9 +1,9 @@ -; RUN: opt < %s -instcombine -S | grep "sdiv i8 \%a, 9" +; RUN: opt < %s -instcombine -S | grep "sdiv nof i8 \%a, 9" ; PR2048 define i8 @i(i8 %a) { - %tmp1 = sdiv i8 %a, -3 - %tmp2 = sdiv i8 %tmp1, -3 + %tmp1 = sdiv exact nof i8 %a, -3 + %tmp2 = sdiv exact nof i8 %tmp1, -3 ret i8 %tmp2 } Index: test/Transforms/InstCombine/2008-05-22-IDivVector.ll =================================================================== --- test/Transforms/InstCombine/2008-05-22-IDivVector.ll +++ test/Transforms/InstCombine/2008-05-22-IDivVector.ll @@ -1,6 +1,6 @@ ; RUN: opt < %s -instcombine -disable-output define <3 x i8> @f(<3 x i8> %i) { - %A = sdiv <3 x i8> %i, %i + %A = sdiv nof <3 x i8> %i, %i ret <3 x i8> %A } Index: test/Transforms/InstCombine/2008-05-31-Bools.ll =================================================================== --- test/Transforms/InstCombine/2008-05-31-Bools.ll +++ test/Transforms/InstCombine/2008-05-31-Bools.ll @@ -14,11 +14,11 @@ } define i1 @foo3(i1 %a, i1 %b) { - %A = udiv i1 %a, %b + %A = udiv nof i1 %a, %b ret i1 %A } define i1 @foo4(i1 %a, i1 %b) { - %A = sdiv i1 %a, %b + %A = sdiv nof i1 %a, %b ret i1 %A } Index: test/Transforms/InstCombine/2008-07-13-DivZero.ll =================================================================== --- test/Transforms/InstCombine/2008-07-13-DivZero.ll +++ test/Transforms/InstCombine/2008-07-13-DivZero.ll @@ -2,14 +2,14 @@ ; RUN: opt < %s -instcombine -S | grep "call .*%cond" ; PR2506 -; We can simplify the operand of udiv to '8', but not the operand to the +; We can simplify the operand of udiv nof to '8', but not the operand to the ; call. If the callee never returns, we can't assume the div is reachable. define i32 @a(i32 %x, i32 %y) { entry: %tobool = icmp ne i32 %y, 0 ; <i1> [#uses=1] %cond = select i1 %tobool, i32 8, i32 0 ; <i32> [#uses=2] %call = call i32 @b( i32 %cond ) ; <i32> [#uses=0] - %div = udiv i32 %x, %cond ; <i32> [#uses=1] + %div = udiv nof i32 %x, %cond ; <i32> [#uses=1] ret i32 %div } Index: test/Transforms/InstCombine/2008-10-11-DivCompareFold.ll =================================================================== --- test/Transforms/InstCombine/2008-10-11-DivCompareFold.ll +++ test/Transforms/InstCombine/2008-10-11-DivCompareFold.ll @@ -2,7 +2,7 @@ ; PR2697 define i1 @x(i32 %x) nounwind { - %div = sdiv i32 %x, 65536 ; <i32> [#uses=1] + %div = sdiv nof i32 %x, 65536 ; <i32> [#uses=1] %cmp = icmp slt i32 %div, -65536 ret i1 %cmp } Index: test/Transforms/InstCombine/2008-11-27-IDivVector.ll =================================================================== --- test/Transforms/InstCombine/2008-11-27-IDivVector.ll +++ test/Transforms/InstCombine/2008-11-27-IDivVector.ll @@ -1,11 +1,11 @@ ; RUN: opt < %s -instcombine -S | not grep div define <2 x i8> @f(<2 x i8> %x) { - %A = udiv <2 x i8> %x, <i8 1, i8 1> + %A = udiv nof <2 x i8> %x, <i8 1, i8 1> ret <2 x i8> %A } define <2 x i8> @g(<2 x i8> %x) { - %A = sdiv <2 x i8> %x, <i8 1, i8 1> + %A = sdiv nof <2 x i8> %x, <i8 1, i8 1> ret <2 x i8> %A } Index: test/Transforms/InstCombine/2008-11-27-UDivNegative.ll =================================================================== --- test/Transforms/InstCombine/2008-11-27-UDivNegative.ll +++ test/Transforms/InstCombine/2008-11-27-UDivNegative.ll @@ -1,6 +1,6 @@ ; RUN: opt < %s -instcombine -S | not grep div define i8 @test(i8 %x) readnone nounwind { - %A = udiv i8 %x, 250 + %A = udiv nof i8 %x, 250 ret i8 %A } Index: test/Transforms/InstCombine/2012-08-28-udiv_ashl.ll =================================================================== --- test/Transforms/InstCombine/2012-08-28-udiv_ashl.ll +++ test/Transforms/InstCombine/2012-08-28-udiv_ashl.ll @@ -6,12 +6,12 @@ target triple = "x86_64-apple-macosx10.8.0" ; CHECK-LABEL: @udiv400( -; CHECK: udiv i32 %x, 400 +; CHECK: udiv nof i32 %x, 400 ; CHECK: ret define i32 @udiv400(i32 %x) { entry: %div = lshr i32 %x, 2 - %div1 = udiv i32 %div, 100 + %div1 = udiv nof i32 %div, 100 ret i32 %div1 } @@ -23,12 +23,12 @@ define i32 @udiv400_no(i32 %x) { entry: %div = ashr i32 %x, 2 - %div1 = udiv i32 %div, 100 + %div1 = udiv nof i32 %div, 100 ret i32 %div1 } ; CHECK-LABEL: @sdiv400_yes( -; CHECK: udiv i32 %x, 400 +; CHECK: udiv nof i32 %x, 400 ; CHECK: ret define i32 @sdiv400_yes(i32 %x) { entry: @@ -36,22 +36,22 @@ ; The sign bits of both operands are zero (i.e. we can prove they are ; unsigned inputs), turn this into a udiv. ; Next, optimize this just like sdiv. - %div1 = sdiv i32 %div, 100 + %div1 = sdiv nof i32 %div, 100 ret i32 %div1 } ; CHECK-LABEL: @udiv_i80( -; CHECK: udiv i80 %x, 400 +; CHECK: udiv nof i80 %x, 400 ; CHECK: ret define i80 @udiv_i80(i80 %x) { %div = lshr i80 %x, 2 - %div1 = udiv i80 %div, 100 + %div1 = udiv nof i80 %div, 100 ret i80 %div1 } define i32 @no_crash_notconst_udiv(i32 %x, i32 %notconst) { %div = lshr i32 %x, %notconst - %div1 = udiv i32 %div, 100 + %div1 = udiv nof i32 %div, 100 ret i32 %div1 } Index: test/Transforms/InstCombine/apint-add.ll =================================================================== --- test/Transforms/InstCombine/apint-add.ll +++ test/Transforms/InstCombine/apint-add.ll @@ -83,17 +83,17 @@ ; CHECK-NEXT: [[XOR:%.*]] = xor i4 %x, -8 ; CHECK-NEXT: [[ZEXT:%.*]] = zext i4 [[XOR]] to i7 ; CHECK-NEXT: [[ADD:%.*]] = sext i4 %x to i7 -; CHECK-NEXT: [[MUL:%.*]] = sdiv i7 [[ZEXT]], [[ADD]] +; CHECK-NEXT: [[MUL:%.*]] = sdiv nof i7 [[ZEXT]], [[ADD]] ; CHECK-NEXT: [[TRUNC:%.*]] = trunc i7 [[MUL]] to i4 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i4 [[TRUNC]], [[XOR]] +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i4 [[TRUNC]], [[XOR]] ; CHECK-NEXT: ret i4 [[DIV]] ; %xor = xor i4 %x, -8 %zext = zext i4 %xor to i7 %add = add nsw i7 %zext, -8 - %mul = sdiv i7 %zext, %add + %mul = sdiv nof i7 %zext, %add %trunc = trunc i7 %mul to i4 - %div = sdiv i4 %trunc, %xor + %div = sdiv nof i4 %trunc, %xor ret i4 %div } Index: test/Transforms/InstCombine/apint-div1.ll =================================================================== --- test/Transforms/InstCombine/apint-div1.ll +++ test/Transforms/InstCombine/apint-div1.ll @@ -5,18 +5,18 @@ define i33 @test1(i33 %X) { - %Y = udiv i33 %X, 4096 + %Y = udiv nof i33 %X, 4096 ret i33 %Y } define i49 @test2(i49 %X) { %tmp.0 = shl i49 4096, 17 - %Y = udiv i49 %X, %tmp.0 + %Y = udiv nof i49 %X, %tmp.0 ret i49 %Y } define i59 @test3(i59 %X, i1 %C) { %V = select i1 %C, i59 1024, i59 4096 - %R = udiv i59 %X, %V + %R = udiv nof i59 %X, %V ret i59 %R } Index: test/Transforms/InstCombine/apint-div2.ll =================================================================== --- test/Transforms/InstCombine/apint-div2.ll +++ test/Transforms/InstCombine/apint-div2.ll @@ -5,18 +5,18 @@ define i333 @test1(i333 %X) { - %Y = udiv i333 %X, 70368744177664 + %Y = udiv nof i333 %X, 70368744177664 ret i333 %Y } define i499 @test2(i499 %X) { %tmp.0 = shl i499 4096, 197 - %Y = udiv i499 %X, %tmp.0 + %Y = udiv nof i499 %X, %tmp.0 ret i499 %Y } define i599 @test3(i599 %X, i1 %C) { %V = select i1 %C, i599 70368744177664, i599 4096 - %R = udiv i599 %X, %V + %R = udiv nof i599 %X, %V ret i599 %R } Index: test/Transforms/InstCombine/apint-shift.ll =================================================================== --- test/Transforms/InstCombine/apint-shift.ll +++ test/Transforms/InstCombine/apint-shift.ll @@ -251,11 +251,11 @@ define <2 x i7> @shl_lshr_splat_vec(<2 x i7> %X) { ; CHECK-LABEL: @shl_lshr_splat_vec( -; CHECK-NEXT: [[DIV:%.*]] = udiv <2 x i7> %X, <i7 9, i7 9> +; CHECK-NEXT: [[DIV:%.*]] = udiv nof <2 x i7> %X, <i7 9, i7 9> ; CHECK-NEXT: [[SH1:%.*]] = shl nuw nsw <2 x i7> [[DIV]], <i7 1, i7 1> ; CHECK-NEXT: ret <2 x i7> [[SH1]] ; - %div = udiv <2 x i7> %X, <i7 9, i7 9> + %div = udiv nof <2 x i7> %X, <i7 9, i7 9> %sh1 = shl nuw <2 x i7> %div, <i7 3, i7 3> %sh2 = lshr exact <2 x i7> %sh1, <i7 2, i7 2> ret <2 x i7> %sh2 Index: test/Transforms/InstCombine/apint-sub.ll =================================================================== --- test/Transforms/InstCombine/apint-sub.ll +++ test/Transforms/InstCombine/apint-sub.ll @@ -151,10 +151,10 @@ define i51 @test16(i51 %A) { ; CHECK-LABEL: @test16( -; CHECK-NEXT: [[Y:%.*]] = sdiv i51 %A, -1123 +; CHECK-NEXT: [[Y:%.*]] = sdiv nof i51 %A, -1123 ; CHECK-NEXT: ret i51 [[Y]] ; - %X = sdiv i51 %A, 1123 + %X = sdiv nof i51 %A, 1123 %Y = sub i51 0, %X ret i51 %Y } @@ -164,11 +164,11 @@ define i25 @test17(i25 %Aok) { ; CHECK-LABEL: @test17( ; CHECK-NEXT: [[B:%.*]] = sub i25 0, %Aok -; CHECK-NEXT: [[C:%.*]] = sdiv i25 [[B]], 1234 +; CHECK-NEXT: [[C:%.*]] = sdiv nof i25 [[B]], 1234 ; CHECK-NEXT: ret i25 [[C]] ; %B = sub i25 0, %Aok - %C = sdiv i25 %B, 1234 + %C = sdiv nof i25 %B, 1234 ret i25 %C } Index: test/Transforms/InstCombine/compare-udiv.ll =================================================================== --- test/Transforms/InstCombine/compare-udiv.ll +++ test/Transforms/InstCombine/compare-udiv.ll @@ -6,7 +6,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i32 %d, %n ; CHECK-NEXT: ret i1 [[CMP1]] ; - %div = udiv i32 %n, %d + %div = udiv nof i32 %n, %d %cmp1 = icmp eq i32 %div, 0 ret i1 %cmp1 } @@ -16,7 +16,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt <2 x i32> %d, %n ; CHECK-NEXT: ret <2 x i1> [[CMP1]] ; - %div = udiv <2 x i32> %n, %d + %div = udiv nof <2 x i32> %n, %d %cmp1 = icmp eq <2 x i32> %div, zeroinitializer ret <2 x i1> %cmp1 } @@ -26,7 +26,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i32 %d, 64 ; CHECK-NEXT: ret i1 [[CMP1]] ; - %div = udiv i32 64, %d + %div = udiv nof i32 64, %d %cmp1 = icmp eq i32 %div, 0 ret i1 %cmp1 } @@ -36,7 +36,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt <2 x i32> %d, <i32 64, i32 63> ; CHECK-NEXT: ret <2 x i1> [[CMP1]] ; - %div = udiv <2 x i32> <i32 64, i32 63>, %d + %div = udiv nof <2 x i32> <i32 64, i32 63>, %d %cmp1 = icmp eq <2 x i32> %div, zeroinitializer ret <2 x i1> %cmp1 } @@ -46,7 +46,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ule i32 %d, %n ; CHECK-NEXT: ret i1 [[CMP1]] ; - %div = udiv i32 %n, %d + %div = udiv nof i32 %n, %d %cmp1 = icmp ne i32 %div, 0 ret i1 %cmp1 } @@ -56,7 +56,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ule <2 x i32> %d, %n ; CHECK-NEXT: ret <2 x i1> [[CMP1]] ; - %div = udiv <2 x i32> %n, %d + %div = udiv nof <2 x i32> %n, %d %cmp1 = icmp ne <2 x i32> %div, zeroinitializer ret <2 x i1> %cmp1 } @@ -66,7 +66,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i32 %d, 65 ; CHECK-NEXT: ret i1 [[CMP1]] ; - %div = udiv i32 64, %d + %div = udiv nof i32 64, %d %cmp1 = icmp ne i32 %div, 0 ret i1 %cmp1 } @@ -76,7 +76,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult <2 x i32> %d, <i32 65, i32 66> ; CHECK-NEXT: ret <2 x i1> [[CMP1]] ; - %div = udiv <2 x i32> <i32 64, i32 65>, %d + %div = udiv nof <2 x i32> <i32 64, i32 65>, %d %cmp1 = icmp ne <2 x i32> %div, zeroinitializer ret <2 x i1> %cmp1 } @@ -85,7 +85,7 @@ ; CHECK-LABEL: @test5( ; CHECK-NEXT: ret i1 true ; - %div = udiv i32 -1, %d + %div = udiv nof i32 -1, %d %cmp1 = icmp ne i32 %div, 0 ret i1 %cmp1 } @@ -94,7 +94,7 @@ ; CHECK-LABEL: @test5vec( ; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true> ; - %div = udiv <2 x i32> <i32 -1, i32 -1>, %d + %div = udiv nof <2 x i32> <i32 -1, i32 -1>, %d %cmp1 = icmp ne <2 x i32> %div, zeroinitializer ret <2 x i1> %cmp1 } @@ -104,7 +104,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i32 %d, 6 ; CHECK-NEXT: ret i1 [[CMP1]] ; - %div = udiv i32 5, %d + %div = udiv nof i32 5, %d %cmp1 = icmp ugt i32 %div, 0 ret i1 %cmp1 } @@ -114,17 +114,17 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult <2 x i32> %d, <i32 6, i32 6> ; CHECK-NEXT: ret <2 x i1> [[CMP1]] ; - %div = udiv <2 x i32> <i32 5, i32 5>, %d + %div = udiv nof <2 x i32> <i32 5, i32 5>, %d %cmp1 = icmp ugt <2 x i32> %div, zeroinitializer ret <2 x i1> %cmp1 } -; (icmp ugt (udiv C1, X), C1) -> false. +; (icmp ugt (udiv nof C1, X), C1) -> false. define i1 @test7(i32 %d) { ; CHECK-LABEL: @test7( ; CHECK-NEXT: ret i1 false ; - %div = udiv i32 8, %d + %div = udiv nof i32 8, %d %cmp1 = icmp ugt i32 %div, 8 ret i1 %cmp1 } @@ -133,7 +133,7 @@ ; CHECK-LABEL: @test7vec( ; CHECK-NEXT: ret <2 x i1> zeroinitializer ; - %div = udiv <2 x i32> <i32 8, i32 8>, %d + %div = udiv nof <2 x i32> <i32 8, i32 8>, %d %cmp1 = icmp ugt <2 x i32> %div, <i32 8, i32 8> ret <2 x i1> %cmp1 } @@ -143,7 +143,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i32 %d, 2 ; CHECK-NEXT: ret i1 [[CMP1]] ; - %div = udiv i32 4, %d + %div = udiv nof i32 4, %d %cmp1 = icmp ugt i32 %div, 3 ret i1 %cmp1 } @@ -153,7 +153,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult <2 x i32> %d, <i32 2, i32 2> ; CHECK-NEXT: ret <2 x i1> [[CMP1]] ; - %div = udiv <2 x i32> <i32 4, i32 4>, %d + %div = udiv nof <2 x i32> <i32 4, i32 4>, %d %cmp1 = icmp ugt <2 x i32> %div, <i32 3, i32 3> ret <2 x i1> %cmp1 } @@ -163,7 +163,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i32 %d, 2 ; CHECK-NEXT: ret i1 [[CMP1]] ; - %div = udiv i32 4, %d + %div = udiv nof i32 4, %d %cmp1 = icmp ugt i32 %div, 2 ret i1 %cmp1 } @@ -173,7 +173,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult <2 x i32> %d, <i32 2, i32 2> ; CHECK-NEXT: ret <2 x i1> [[CMP1]] ; - %div = udiv <2 x i32> <i32 4, i32 4>, %d + %div = udiv nof <2 x i32> <i32 4, i32 4>, %d %cmp1 = icmp ugt <2 x i32> %div, <i32 2, i32 2> ret <2 x i1> %cmp1 } @@ -183,7 +183,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i32 %d, 3 ; CHECK-NEXT: ret i1 [[CMP1]] ; - %div = udiv i32 4, %d + %div = udiv nof i32 4, %d %cmp1 = icmp ugt i32 %div, 1 ret i1 %cmp1 } @@ -193,7 +193,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult <2 x i32> %d, <i32 3, i32 3> ; CHECK-NEXT: ret <2 x i1> [[CMP1]] ; - %div = udiv <2 x i32> <i32 4, i32 4>, %d + %div = udiv nof <2 x i32> <i32 4, i32 4>, %d %cmp1 = icmp ugt <2 x i32> %div, <i32 1, i32 1> ret <2 x i1> %cmp1 } @@ -203,7 +203,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i32 %d, 4 ; CHECK-NEXT: ret i1 [[CMP1]] ; - %div = udiv i32 4, %d + %div = udiv nof i32 4, %d %cmp1 = icmp ult i32 %div, 1 ret i1 %cmp1 } @@ -213,7 +213,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt <2 x i32> %d, <i32 4, i32 4> ; CHECK-NEXT: ret <2 x i1> [[CMP1]] ; - %div = udiv <2 x i32> <i32 4, i32 4>, %d + %div = udiv nof <2 x i32> <i32 4, i32 4>, %d %cmp1 = icmp ult <2 x i32> %div, <i32 1, i32 1> ret <2 x i1> %cmp1 } @@ -223,7 +223,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i32 %d, 2 ; CHECK-NEXT: ret i1 [[CMP1]] ; - %div = udiv i32 4, %d + %div = udiv nof i32 4, %d %cmp1 = icmp ult i32 %div, 2 ret i1 %cmp1 } @@ -233,7 +233,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt <2 x i32> %d, <i32 2, i32 2> ; CHECK-NEXT: ret <2 x i1> [[CMP1]] ; - %div = udiv <2 x i32> <i32 4, i32 4>, %d + %div = udiv nof <2 x i32> <i32 4, i32 4>, %d %cmp1 = icmp ult <2 x i32> %div, <i32 2, i32 2> ret <2 x i1> %cmp1 } @@ -243,7 +243,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i32 %d, 1 ; CHECK-NEXT: ret i1 [[CMP1]] ; - %div = udiv i32 4, %d + %div = udiv nof i32 4, %d %cmp1 = icmp ult i32 %div, 3 ret i1 %cmp1 } @@ -253,7 +253,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt <2 x i32> %d, <i32 1, i32 1> ; CHECK-NEXT: ret <2 x i1> [[CMP1]] ; - %div = udiv <2 x i32> <i32 4, i32 4>, %d + %div = udiv nof <2 x i32> <i32 4, i32 4>, %d %cmp1 = icmp ult <2 x i32> %div, <i32 3, i32 3> ret <2 x i1> %cmp1 } @@ -263,7 +263,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt i32 %d, 1 ; CHECK-NEXT: ret i1 [[CMP1]] ; - %div = udiv i32 4, %d + %div = udiv nof i32 4, %d %cmp1 = icmp ult i32 %div, 4 ret i1 %cmp1 } @@ -273,7 +273,7 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ugt <2 x i32> %d, <i32 1, i32 1> ; CHECK-NEXT: ret <2 x i1> [[CMP1]] ; - %div = udiv <2 x i32> <i32 4, i32 4>, %d + %div = udiv nof <2 x i32> <i32 4, i32 4>, %d %cmp1 = icmp ult <2 x i32> %div, <i32 4, i32 4> ret <2 x i1> %cmp1 } @@ -283,7 +283,7 @@ ; CHECK-LABEL: @test15( ; CHECK-NEXT: ret i1 false ; - %div = udiv i32 4, %d + %div = udiv nof i32 4, %d %cmp1 = icmp ugt i32 %div, -1 ret i1 %cmp1 } @@ -292,7 +292,7 @@ ; CHECK-LABEL: @test15vec( ; CHECK-NEXT: ret <2 x i1> zeroinitializer ; - %div = udiv <2 x i32> <i32 4, i32 4>, %d + %div = udiv nof <2 x i32> <i32 4, i32 4>, %d %cmp1 = icmp ugt <2 x i32> %div, <i32 -1, i32 -1> ret <2 x i1> %cmp1 } @@ -302,7 +302,7 @@ ; CHECK-LABEL: @test16( ; CHECK-NEXT: ret i1 true ; - %div = udiv i32 4, %d + %div = udiv nof i32 4, %d %cmp1 = icmp ult i32 %div, -1 ret i1 %cmp1 } @@ -311,7 +311,7 @@ ; CHECK-LABEL: @test16vec( ; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true> ; - %div = udiv <2 x i32> <i32 4, i32 4>, %d + %div = udiv nof <2 x i32> <i32 4, i32 4>, %d %cmp1 = icmp ult <2 x i32> %div, <i32 -1, i32 -1> ret <2 x i1> %cmp1 } Index: test/Transforms/InstCombine/demorgan.ll =================================================================== --- test/Transforms/InstCombine/demorgan.ll +++ test/Transforms/InstCombine/demorgan.ll @@ -250,14 +250,14 @@ ; CHECK-NEXT: [[USE2A:%.*]] = mul i8 [[NOTA]], 23 ; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 %B, -1 ; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[B_NOT]], %A -; CHECK-NEXT: [[R:%.*]] = sdiv i8 [[NOTC]], [[USE2A]] +; CHECK-NEXT: [[R:%.*]] = sdiv nof i8 [[NOTC]], [[USE2A]] ; CHECK-NEXT: ret i8 [[R]] ; %nota = xor i8 %A, -1 %use2a = mul i8 %nota, 23 %c = or i8 %nota, %B %notc = xor i8 %c, -1 - %r = sdiv i8 %notc, %use2a + %r = sdiv nof i8 %notc, %use2a ret i8 %r } @@ -268,14 +268,14 @@ ; CHECK-NEXT: [[USE2B:%.*]] = mul i8 %B, 23 ; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 %B, -1 ; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[B_NOT]], %A -; CHECK-NEXT: [[R:%.*]] = sdiv i8 [[NOTC]], [[USE2B]] +; CHECK-NEXT: [[R:%.*]] = sdiv nof i8 [[NOTC]], [[USE2B]] ; CHECK-NEXT: ret i8 [[R]] ; %use2b = mul i8 %B, 23 %nota = xor i8 %A, -1 %c = or i8 %nota, %B %notc = xor i8 %c, -1 - %r = sdiv i8 %notc, %use2b + %r = sdiv nof i8 %notc, %use2b ret i8 %r } @@ -287,14 +287,14 @@ ; CHECK-NEXT: [[C:%.*]] = or i8 [[NOTA]], %B ; CHECK-NEXT: [[USE2C:%.*]] = mul i8 [[C]], 23 ; CHECK-NEXT: [[NOTC:%.*]] = xor i8 [[C]], -1 -; CHECK-NEXT: [[R:%.*]] = sdiv i8 [[NOTC]], [[USE2C]] +; CHECK-NEXT: [[R:%.*]] = sdiv nof i8 [[NOTC]], [[USE2C]] ; CHECK-NEXT: ret i8 [[R]] ; %nota = xor i8 %A, -1 %c = or i8 %nota, %B %use2c = mul i8 %c, 23 %notc = xor i8 %c, -1 - %r = sdiv i8 %notc, %use2c + %r = sdiv nof i8 %notc, %use2c ret i8 %r } @@ -307,8 +307,8 @@ ; CHECK-NEXT: [[USE2A:%.*]] = mul i8 [[NOTA]], 17 ; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 %B, -1 ; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[B_NOT]], %A -; CHECK-NEXT: [[R1:%.*]] = sdiv i8 [[NOTC]], [[USE2B]] -; CHECK-NEXT: [[R2:%.*]] = sdiv i8 [[R1]], [[USE2A]] +; CHECK-NEXT: [[R1:%.*]] = sdiv nof i8 [[NOTC]], [[USE2B]] +; CHECK-NEXT: [[R2:%.*]] = sdiv nof i8 [[R1]], [[USE2A]] ; CHECK-NEXT: ret i8 [[R2]] ; %use2b = mul i8 %B, 23 @@ -316,8 +316,8 @@ %use2a = mul i8 %nota, 17 %c = or i8 %nota, %B %notc = xor i8 %c, -1 - %r1 = sdiv i8 %notc, %use2b - %r2 = sdiv i8 %r1, %use2a + %r1 = sdiv nof i8 %notc, %use2b + %r2 = sdiv nof i8 %r1, %use2a ret i8 %r2 } @@ -330,8 +330,8 @@ ; CHECK-NEXT: [[C:%.*]] = or i8 [[NOTA]], %B ; CHECK-NEXT: [[USE2C:%.*]] = mul i8 [[C]], 23 ; CHECK-NEXT: [[NOTC:%.*]] = xor i8 [[C]], -1 -; CHECK-NEXT: [[R1:%.*]] = sdiv i8 [[NOTC]], [[USE2C]] -; CHECK-NEXT: [[R2:%.*]] = sdiv i8 [[R1]], [[USE2A]] +; CHECK-NEXT: [[R1:%.*]] = sdiv nof i8 [[NOTC]], [[USE2C]] +; CHECK-NEXT: [[R2:%.*]] = sdiv nof i8 [[R1]], [[USE2A]] ; CHECK-NEXT: ret i8 [[R2]] ; %nota = xor i8 %A, -1 @@ -339,8 +339,8 @@ %c = or i8 %nota, %B %use2c = mul i8 %c, 23 %notc = xor i8 %c, -1 - %r1 = sdiv i8 %notc, %use2c - %r2 = sdiv i8 %r1, %use2a + %r1 = sdiv nof i8 %notc, %use2c + %r2 = sdiv nof i8 %r1, %use2a ret i8 %r2 } @@ -353,8 +353,8 @@ ; CHECK-NEXT: [[C:%.*]] = or i8 [[NOTA]], %B ; CHECK-NEXT: [[USE2C:%.*]] = mul i8 [[C]], 23 ; CHECK-NEXT: [[NOTC:%.*]] = xor i8 [[C]], -1 -; CHECK-NEXT: [[R1:%.*]] = sdiv i8 [[NOTC]], [[USE2C]] -; CHECK-NEXT: [[R2:%.*]] = sdiv i8 [[R1]], [[USE2B]] +; CHECK-NEXT: [[R1:%.*]] = sdiv nof i8 [[NOTC]], [[USE2C]] +; CHECK-NEXT: [[R2:%.*]] = sdiv nof i8 [[R1]], [[USE2B]] ; CHECK-NEXT: ret i8 [[R2]] ; %use2b = mul i8 %B, 23 @@ -362,8 +362,8 @@ %c = or i8 %nota, %B %use2c = mul i8 %c, 23 %notc = xor i8 %c, -1 - %r1 = sdiv i8 %notc, %use2c - %r2 = sdiv i8 %r1, %use2b + %r1 = sdiv nof i8 %notc, %use2c + %r2 = sdiv nof i8 %r1, %use2b ret i8 %r2 } Index: test/Transforms/InstCombine/div-shift-crash.ll =================================================================== --- test/Transforms/InstCombine/div-shift-crash.ll +++ test/Transforms/InstCombine/div-shift-crash.ll @@ -71,7 +71,7 @@ br i1 undef, label %safe_div_func_uint32_t_u_u.exit.i.i.i, label %cond.false.i.i.i.i cond.false.i.i.i.i: ; preds = %for.end32.i.i.i - %div.i.i.i.i = udiv i32 %conv33.i.i.i, %.sub5.i.i.i.i + %div.i.i.i.i = udiv nof i32 %conv33.i.i.i, %.sub5.i.i.i.i br label %safe_div_func_uint32_t_u_u.exit.i.i.i safe_div_func_uint32_t_u_u.exit.i.i.i: ; preds = %cond.false.i.i.i.i, %for.end32.i.i.i Index: test/Transforms/InstCombine/div-shift.ll =================================================================== --- test/Transforms/InstCombine/div-shift.ll +++ test/Transforms/InstCombine/div-shift.ll @@ -12,7 +12,7 @@ entry: %conv = zext i16 %x to i32 %s = shl i32 2, %y - %d = sdiv i32 %conv, %s + %d = sdiv nof i32 %conv, %s ret i32 %d } @@ -27,7 +27,7 @@ entry: %conv = zext <2 x i16> %x to <2 x i32> %s = shl <2 x i32> <i32 2, i32 2>, %y - %d = sdiv <2 x i32> %conv, %s + %d = sdiv nof <2 x i32> %conv, %s ret <2 x i32> %d } @@ -40,7 +40,7 @@ ; %1 = shl i32 1, %y %2 = zext i32 %1 to i64 - %3 = udiv i64 %x, %2 + %3 = udiv nof i64 %x, %2 ret i64 %3 } @@ -54,7 +54,7 @@ ; %1 = shl i32 4, %y %2 = zext i32 %1 to i64 - %3 = udiv i64 %x, %2 + %3 = udiv nof i64 %x, %2 ret i64 %3 } @@ -68,7 +68,7 @@ %1 = shl i32 1, %y %2 = icmp ult i32 %1, 32 %3 = select i1 %2, i32 32, i32 %1 - %4 = udiv i32 %x, %3 + %4 = udiv nof i32 %x, %3 ret i32 %4 } @@ -82,7 +82,7 @@ %1 = shl i32 1, %V %2 = select i1 %x, i32 32, i32 64 %3 = select i1 %y, i32 %2, i32 %1 - %4 = udiv i32 %V, %3 + %4 = udiv nof i32 %V, %3 ret i32 %4 } @@ -90,11 +90,11 @@ ; CHECK-LABEL: @t6( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = icmp eq i32 %x, 0 ; CHECK-NEXT: [[DIVISOR:%.*]] = select i1 [[X_IS_ZERO]], i32 1, i32 %x -; CHECK-NEXT: [[Y:%.*]] = udiv i32 %z, [[DIVISOR]] +; CHECK-NEXT: [[Y:%.*]] = udiv nof i32 %z, [[DIVISOR]] ; CHECK-NEXT: ret i32 [[Y]] ; %x_is_zero = icmp eq i32 %x, 0 %divisor = select i1 %x_is_zero, i32 1, i32 %x - %y = udiv i32 %z, %divisor + %y = udiv nof i32 %z, %divisor ret i32 %y } Index: test/Transforms/InstCombine/div.ll =================================================================== --- test/Transforms/InstCombine/div.ll +++ test/Transforms/InstCombine/div.ll @@ -7,7 +7,7 @@ ; CHECK-LABEL: @test1( ; CHECK-NEXT: ret i32 %A ; - %B = sdiv i32 %A, 1 ; <i32> [#uses=1] + %B = sdiv nof i32 %A, 1 ; <i32> [#uses=1] ret i32 %B } @@ -17,7 +17,7 @@ ; CHECK-NEXT: [[B:%.*]] = lshr i32 %A, 3 ; CHECK-NEXT: ret i32 [[B]] ; - %B = udiv i32 %A, 8 ; <i32> [#uses=1] + %B = udiv nof i32 %A, 8 ; <i32> [#uses=1] ret i32 %B } @@ -26,7 +26,7 @@ ; CHECK-LABEL: @test3( ; CHECK-NEXT: ret i32 0 ; - %B = sdiv i32 0, %A ; <i32> [#uses=1] + %B = sdiv nof i32 0, %A ; <i32> [#uses=1] ret i32 %B } @@ -36,7 +36,7 @@ ; CHECK-NEXT: [[B:%.*]] = sub i32 0, %A ; CHECK-NEXT: ret i32 [[B]] ; - %B = sdiv i32 %A, -1 ; <i32> [#uses=1] + %B = sdiv nof i32 %A, -1 ; <i32> [#uses=1] ret i32 %B } @@ -44,8 +44,8 @@ ; CHECK-LABEL: @test5( ; CHECK-NEXT: ret i32 0 ; - %B = udiv i32 %A, -16 ; <i32> [#uses=1] - %C = udiv i32 %B, -4 ; <i32> [#uses=1] + %B = udiv nof i32 %A, -16 ; <i32> [#uses=1] + %C = udiv nof i32 %B, -4 ; <i32> [#uses=1] ret i32 %C } @@ -54,7 +54,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 %A, 123 ; CHECK-NEXT: ret i1 [[TMP1]] ; - %B = udiv i32 %A, 123 ; <i32> [#uses=1] + %B = udiv nof i32 %A, 123 ; <i32> [#uses=1] ; A < 123 %C = icmp eq i32 %B, 0 ; <i1> [#uses=1] ret i1 %C @@ -66,7 +66,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i32 [[A_OFF]], 10 ; CHECK-NEXT: ret i1 [[TMP1]] ; - %B = udiv i32 %A, 10 ; <i32> [#uses=1] + %B = udiv nof i32 %A, 10 ; <i32> [#uses=1] ; A >= 20 && A < 30 %C = icmp eq i32 %B, 2 ; <i1> [#uses=1] ret i1 %C @@ -78,7 +78,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <2 x i32> [[A_OFF]], <i32 10, i32 10> ; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; - %B = udiv <2 x i32> %A, <i32 10, i32 10> + %B = udiv nof <2 x i32> %A, <i32 10, i32 10> %C = icmp eq <2 x i32> %B, <i32 2, i32 2> ret <2 x i1> %C } @@ -88,7 +88,7 @@ ; CHECK-NEXT: [[C:%.*]] = icmp ugt i8 %A, -11 ; CHECK-NEXT: ret i1 [[C]] ; - %B = udiv i8 %A, 123 ; <i8> [#uses=1] + %B = udiv nof i8 %A, 123 ; <i8> [#uses=1] ; A >= 246 %C = icmp eq i8 %B, 2 ; <i1> [#uses=1] ret i1 %C @@ -99,7 +99,7 @@ ; CHECK-NEXT: [[C:%.*]] = icmp ugt <2 x i8> %A, <i8 -11, i8 -11> ; CHECK-NEXT: ret <2 x i1> [[C]] ; - %B = udiv <2 x i8> %A, <i8 123, i8 123> + %B = udiv nof <2 x i8> %A, <i8 123, i8 123> %C = icmp eq <2 x i8> %B, <i8 2, i8 2> ret <2 x i1> %C } @@ -109,7 +109,7 @@ ; CHECK-NEXT: [[C:%.*]] = icmp ult i8 %A, -10 ; CHECK-NEXT: ret i1 [[C]] ; - %B = udiv i8 %A, 123 ; <i8> [#uses=1] + %B = udiv nof i8 %A, 123 ; <i8> [#uses=1] ; A < 246 %C = icmp ne i8 %B, 2 ; <i1> [#uses=1] ret i1 %C @@ -120,7 +120,7 @@ ; CHECK-NEXT: [[C:%.*]] = icmp ult <2 x i8> %A, <i8 -10, i8 -10> ; CHECK-NEXT: ret <2 x i1> [[C]] ; - %B = udiv <2 x i8> %A, <i8 123, i8 123> + %B = udiv nof <2 x i8> %A, <i8 123, i8 123> %C = icmp ne <2 x i8> %B, <i8 2, i8 2> ret <2 x i1> %C } @@ -132,7 +132,7 @@ ; CHECK-NEXT: ret i32 [[R]] ; %V = select i1 %C, i32 64, i32 8 ; <i32> [#uses=1] - %R = udiv i32 %X, %V ; <i32> [#uses=1] + %R = udiv nof i32 %X, %V ; <i32> [#uses=1] ret i32 %R } @@ -143,7 +143,7 @@ ; CHECK-NEXT: ret i32 [[B]] ; %A = select i1 %C, i32 1024, i32 32 ; <i32> [#uses=1] - %B = udiv i32 %X, %A ; <i32> [#uses=1] + %B = udiv nof i32 %X, %A ; <i32> [#uses=1] ret i32 %B } @@ -152,7 +152,7 @@ ; CHECK-LABEL: @test12( ; CHECK-NEXT: ret i32 1 ; - %tmp3 = udiv i32 %x, %x ; 1 + %tmp3 = udiv nof i32 %x, %x ; 1 ret i32 %tmp3 } @@ -160,7 +160,7 @@ ; CHECK-LABEL: @test13( ; CHECK-NEXT: ret i32 1 ; - %tmp3 = sdiv i32 %x, %x ; 1 + %tmp3 = sdiv nof i32 %x, %x ; 1 ret i32 %tmp3 } @@ -169,7 +169,7 @@ ; CHECK-NEXT: ret i32 0 ; %zext = zext i8 %x to i32 - %div = udiv i32 %zext, 257 ; 0 + %div = udiv nof i32 %zext, 257 ; 0 ret i32 %div } @@ -182,27 +182,27 @@ ; %shl = shl i32 1, %b %div = lshr i32 %shl, 2 - %div2 = udiv i32 %a, %div + %div2 = udiv nof i32 %a, %div ret i32 %div2 } define <2 x i64> @test16(<2 x i64> %x) nounwind { ; CHECK-LABEL: @test16( -; CHECK-NEXT: [[DIV:%.*]] = udiv <2 x i64> %x, <i64 192, i64 192> +; CHECK-NEXT: [[DIV:%.*]] = udiv nof <2 x i64> %x, <i64 192, i64 192> ; CHECK-NEXT: ret <2 x i64> [[DIV]] ; %shr = lshr <2 x i64> %x, <i64 5, i64 5> - %div = udiv <2 x i64> %shr, <i64 6, i64 6> + %div = udiv nof <2 x i64> %shr, <i64 6, i64 6> ret <2 x i64> %div } define <2 x i64> @test17(<2 x i64> %x) nounwind { ; CHECK-LABEL: @test17( -; CHECK-NEXT: [[DIV:%.*]] = sdiv <2 x i64> %x, <i64 -3, i64 -4> +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof <2 x i64> %x, <i64 -3, i64 -4> ; CHECK-NEXT: ret <2 x i64> [[DIV]] ; %neg = sub nsw <2 x i64> zeroinitializer, %x - %div = sdiv <2 x i64> %neg, <i64 3, i64 4> + %div = sdiv nof <2 x i64> %neg, <i64 3, i64 4> ret <2 x i64> %div } @@ -211,7 +211,7 @@ ; CHECK-NEXT: [[DIV:%.*]] = sub <2 x i64> zeroinitializer, %x ; CHECK-NEXT: ret <2 x i64> [[DIV]] ; - %div = sdiv <2 x i64> %x, <i64 -1, i64 -1> + %div = sdiv nof <2 x i64> %x, <i64 -1, i64 -1> ret <2 x i64> %div } @@ -221,7 +221,7 @@ ; CHECK-NEXT: [[A:%.*]] = zext i1 [[TMP1]] to i32 ; CHECK-NEXT: ret i32 [[A]] ; - %A = udiv i32 1, %x + %A = udiv nof i32 1, %x ret i32 %A } @@ -231,7 +231,7 @@ ; CHECK-NEXT: [[A:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[A]] ; - %A = udiv <2 x i32> <i32 1, i32 1>, %x + %A = udiv nof <2 x i32> <i32 1, i32 1>, %x ret <2 x i32> %A } @@ -242,7 +242,7 @@ ; CHECK-NEXT: [[A:%.*]] = select i1 [[TMP2]], i32 %x, i32 0 ; CHECK-NEXT: ret i32 [[A]] ; - %A = sdiv i32 1, %x + %A = sdiv nof i32 1, %x ret i32 %A } @@ -253,37 +253,37 @@ ; CHECK-NEXT: [[A:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> [[X]], <2 x i32> zeroinitializer ; CHECK-NEXT: ret <2 x i32> [[A]] ; - %A = sdiv <2 x i32> <i32 1, i32 1>, %x + %A = sdiv nof <2 x i32> <i32 1, i32 1>, %x ret <2 x i32> %A } define i32 @test21(i32 %a) { ; CHECK-LABEL: @test21( -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, 3 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 %a, 3 ; CHECK-NEXT: ret i32 [[DIV]] ; %shl = shl nsw i32 %a, 2 - %div = sdiv i32 %shl, 12 + %div = sdiv nof i32 %shl, 12 ret i32 %div } define i32 @test22(i32 %a) { ; CHECK-LABEL: @test22( -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, 4 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 %a, 4 ; CHECK-NEXT: ret i32 [[DIV]] ; %mul = mul nsw i32 %a, 3 - %div = sdiv i32 %mul, 12 + %div = sdiv nof i32 %mul, 12 ret i32 %div } define i32 @test23(i32 %a) { ; CHECK-LABEL: @test23( -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 %a, 3 +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i32 %a, 3 ; CHECK-NEXT: ret i32 [[DIV]] ; %shl = shl nuw i32 %a, 2 - %div = udiv i32 %shl, 12 + %div = udiv nof i32 %shl, 12 ret i32 %div } @@ -293,7 +293,7 @@ ; CHECK-NEXT: ret i32 [[DIV]] ; %mul = mul nuw i32 %a, 3 - %div = udiv i32 %mul, 12 + %div = udiv nof i32 %mul, 12 ret i32 %div } @@ -303,7 +303,7 @@ ; CHECK-NEXT: ret i32 [[DIV]] ; %shl = shl nsw i32 %a, 2 - %div = sdiv i32 %shl, 2 + %div = sdiv nof i32 %shl, 2 ret i32 %div } @@ -313,7 +313,7 @@ ; CHECK-NEXT: ret i32 [[DIV]] ; %mul = mul nsw i32 %a, 12 - %div = sdiv i32 %mul, 3 + %div = sdiv nof i32 %mul, 3 ret i32 %div } @@ -323,7 +323,7 @@ ; CHECK-NEXT: ret i32 [[DIV]] ; %shl = shl nuw i32 %a, 2 - %div = udiv i32 %shl, 2 + %div = udiv nof i32 %shl, 2 ret i32 %div } @@ -333,7 +333,7 @@ ; CHECK-NEXT: ret i32 [[DIV]] ; %mul = mul nuw i32 %a, 36 - %div = udiv i32 %mul, 3 + %div = udiv nof i32 %mul, 3 ret i32 %div } @@ -343,7 +343,7 @@ ; CHECK-NEXT: ret i32 [[MUL_LOBIT]] ; %mul = shl nsw i32 %a, 31 - %div = sdiv i32 %mul, -2147483648 + %div = sdiv nof i32 %mul, -2147483648 ret i32 %div } @@ -352,7 +352,7 @@ ; CHECK-NEXT: ret i32 %a ; %mul = shl nuw i32 %a, 31 - %div = udiv i32 %mul, -2147483648 + %div = udiv nof i32 %mul, -2147483648 ret i32 %div } @@ -361,7 +361,7 @@ ; CHECK-NEXT: ret <2 x i32> zeroinitializer ; %shr = lshr <2 x i32> %x, <i32 31, i32 31> - %div = udiv <2 x i32> %shr, <i32 2147483647, i32 2147483647> + %div = udiv nof <2 x i32> %shr, <i32 2147483647, i32 2147483647> ret <2 x i32> %div } @@ -369,54 +369,54 @@ ; CHECK-LABEL: @test32( ; CHECK-NEXT: [[SHL:%.*]] = shl i32 2, %b ; CHECK-NEXT: [[DIV:%.*]] = lshr i32 [[SHL]], 2 -; CHECK-NEXT: [[DIV2:%.*]] = udiv i32 %a, [[DIV]] +; CHECK-NEXT: [[DIV2:%.*]] = udiv nof i32 %a, [[DIV]] ; CHECK-NEXT: ret i32 [[DIV2]] ; %shl = shl i32 2, %b %div = lshr i32 %shl, 2 - %div2 = udiv i32 %a, %div + %div2 = udiv nof i32 %a, %div ret i32 %div2 } define <2 x i64> @test33(<2 x i64> %x) nounwind { ; CHECK-LABEL: @test33( -; CHECK-NEXT: [[DIV:%.*]] = udiv exact <2 x i64> %x, <i64 192, i64 192> +; CHECK-NEXT: [[DIV:%.*]] = udiv exact nof <2 x i64> %x, <i64 192, i64 192> ; CHECK-NEXT: ret <2 x i64> [[DIV]] ; %shr = lshr exact <2 x i64> %x, <i64 5, i64 5> - %div = udiv exact <2 x i64> %shr, <i64 6, i64 6> + %div = udiv exact nof <2 x i64> %shr, <i64 6, i64 6> ret <2 x i64> %div } define <2 x i64> @test34(<2 x i64> %x) nounwind { ; CHECK-LABEL: @test34( -; CHECK-NEXT: [[DIV:%.*]] = sdiv exact <2 x i64> %x, <i64 -3, i64 -4> +; CHECK-NEXT: [[DIV:%.*]] = sdiv exact nof <2 x i64> %x, <i64 -3, i64 -4> ; CHECK-NEXT: ret <2 x i64> [[DIV]] ; %neg = sub nsw <2 x i64> zeroinitializer, %x - %div = sdiv exact <2 x i64> %neg, <i64 3, i64 4> + %div = sdiv exact nof <2 x i64> %neg, <i64 3, i64 4> ret <2 x i64> %div } define i32 @test35(i32 %A) { ; CHECK-LABEL: @test35( ; CHECK-NEXT: [[AND:%.*]] = and i32 %A, 2147483647 -; CHECK-NEXT: [[MUL:%.*]] = udiv exact i32 [[AND]], 2147483647 +; CHECK-NEXT: [[MUL:%.*]] = udiv exact nof i32 [[AND]], 2147483647 ; CHECK-NEXT: ret i32 [[MUL]] ; %and = and i32 %A, 2147483647 - %mul = sdiv exact i32 %and, 2147483647 + %mul = sdiv exact nof i32 %and, 2147483647 ret i32 %mul } define <2 x i32> @test35vec(<2 x i32> %A) { ; CHECK-LABEL: @test35vec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[A:%.*]], <i32 2147483647, i32 2147483647> -; CHECK-NEXT: [[MUL:%.*]] = udiv exact <2 x i32> [[AND]], <i32 2147483647, i32 2147483647> +; CHECK-NEXT: [[MUL:%.*]] = udiv exact nof <2 x i32> [[AND]], <i32 2147483647, i32 2147483647> ; CHECK-NEXT: ret <2 x i32> [[MUL]] ; %and = and <2 x i32> %A, <i32 2147483647, i32 2147483647> - %mul = sdiv exact <2 x i32> %and, <i32 2147483647, i32 2147483647> + %mul = sdiv exact nof <2 x i32> %and, <i32 2147483647, i32 2147483647> ret <2 x i32> %mul } @@ -428,7 +428,7 @@ ; %and = and i32 %A, 2147483647 %shl = shl nsw i32 1, %A - %mul = sdiv exact i32 %and, %shl + %mul = sdiv exact nof i32 %and, %shl ret i32 %mul } @@ -440,7 +440,7 @@ ; %and = and <2 x i32> %A, <i32 2147483647, i32 2147483647> %shl = shl nsw <2 x i32> <i32 1, i32 1>, %A - %mul = sdiv exact <2 x i32> %and, %shl + %mul = sdiv exact nof <2 x i32> %and, %shl ret <2 x i32> %mul } @@ -465,7 +465,7 @@ lor.end: ; preds = %lor.rhs, %entry %t.0 = phi i32 [ %0, %entry ], [ %mul, %lor.rhs ] - %div = sdiv i32 %t.0, 2 + %div = sdiv nof i32 %t.0, 2 ret i32 %div } @@ -473,12 +473,12 @@ define i32 @shrink(i8 %x) { ; CHECK-LABEL: @shrink( -; CHECK-NEXT: [[TMP1:%.*]] = sdiv i8 %x, 127 +; CHECK-NEXT: [[TMP1:%.*]] = sdiv nof i8 %x, 127 ; CHECK-NEXT: [[DIV:%.*]] = sext i8 [[TMP1]] to i32 ; CHECK-NEXT: ret i32 [[DIV]] ; %conv = sext i8 %x to i32 - %div = sdiv i32 %conv, 127 + %div = sdiv nof i32 %conv, 127 ret i32 %div } @@ -491,7 +491,7 @@ ; CHECK-NEXT: ret i32 [[DIV]] ; %conv = sext i8 %x to i32 - %div = sdiv i32 %conv, -128 + %div = sdiv nof i32 %conv, -128 ret i32 %div } @@ -499,12 +499,12 @@ define <3 x i32> @shrink_vec(<3 x i8> %x) { ; CHECK-LABEL: @shrink_vec( -; CHECK-NEXT: [[TMP1:%.*]] = sdiv <3 x i8> %x, <i8 127, i8 127, i8 127> +; CHECK-NEXT: [[TMP1:%.*]] = sdiv nof <3 x i8> %x, <i8 127, i8 127, i8 127> ; CHECK-NEXT: [[DIV:%.*]] = sext <3 x i8> [[TMP1]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[DIV]] ; %conv = sext <3 x i8> %x to <3 x i32> - %div = sdiv <3 x i32> %conv, <i32 127, i32 127, i32 127> + %div = sdiv nof <3 x i32> %conv, <i32 127, i32 127, i32 127> ret <3 x i32> %div } @@ -515,7 +515,7 @@ ; CHECK-NEXT: ret <2 x i32> [[DIV]] ; %conv = sext <2 x i8> %x to <2 x i32> - %div = sdiv <2 x i32> %conv, <i32 -128, i32 -128> + %div = sdiv nof <2 x i32> %conv, <i32 -128, i32 -128> ret <2 x i32> %div } @@ -524,11 +524,11 @@ define i32 @shrink_no(i8 %x) { ; CHECK-LABEL: @shrink_no( ; CHECK-NEXT: [[CONV:%.*]] = sext i8 %x to i32 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[CONV]], 128 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 [[CONV]], 128 ; CHECK-NEXT: ret i32 [[DIV]] ; %conv = sext i8 %x to i32 - %div = sdiv i32 %conv, 128 + %div = sdiv nof i32 %conv, 128 ret i32 %div } @@ -540,7 +540,7 @@ ; CHECK-NEXT: ret i32 0 ; %conv = sext i8 %x to i32 - %div = sdiv i32 %conv, -129 + %div = sdiv nof i32 %conv, -129 ret i32 %div } @@ -549,7 +549,7 @@ ; CHECK-NEXT: ret i32 0 ; %conv = sext i16 %x to i32 - %div = sdiv i32 %conv, 65535 + %div = sdiv nof i32 %conv, 65535 ret i32 %div } @@ -559,6 +559,6 @@ ; CHECK-NEXT: ret <2 x i8> zeroinitializer ; %neg = and <2 x i8> %x, <i8 2, i8 2> - %div = udiv <2 x i8> <i8 1, i8 1>, %neg + %div = udiv nof <2 x i8> <i8 1, i8 1>, %neg ret <2 x i8> %div } Index: test/Transforms/InstCombine/exact.ll =================================================================== --- test/Transforms/InstCombine/exact.ll +++ test/Transforms/InstCombine/exact.ll @@ -3,10 +3,10 @@ define i32 @sdiv1(i32 %x) { ; CHECK-LABEL: @sdiv1( -; CHECK-NEXT: [[Y:%.*]] = sdiv i32 %x, 8 +; CHECK-NEXT: [[Y:%.*]] = sdiv nof i32 %x, 8 ; CHECK-NEXT: ret i32 [[Y]] ; - %y = sdiv i32 %x, 8 + %y = sdiv nof i32 %x, 8 ret i32 %y } @@ -15,7 +15,7 @@ ; CHECK-NEXT: [[Y:%.*]] = ashr exact i32 %x, 3 ; CHECK-NEXT: ret i32 [[Y]] ; - %y = sdiv exact i32 %x, 8 + %y = sdiv exact nof i32 %x, 8 ret i32 %y } @@ -24,7 +24,7 @@ ; CHECK-NEXT: [[Y:%.*]] = ashr exact <2 x i32> %x, <i32 7, i32 7> ; CHECK-NEXT: ret <2 x i32> [[Y]] ; - %y = sdiv exact <2 x i32> %x, <i32 128, i32 128> + %y = sdiv exact nof <2 x i32> %x, <i32 128, i32 128> ret <2 x i32> %y } @@ -34,7 +34,7 @@ ; CHECK-NEXT: [[Z:%.*]] = sub i32 %x, [[Y]] ; CHECK-NEXT: ret i32 [[Z]] ; - %y = sdiv i32 %x, 3 + %y = sdiv nof i32 %x, 3 %z = mul i32 %y, 3 ret i32 %z } @@ -43,7 +43,7 @@ ; CHECK-LABEL: @sdiv4( ; CHECK-NEXT: ret i32 %x ; - %y = sdiv exact i32 %x, 3 + %y = sdiv exact nof i32 %x, 3 %z = mul i32 %y, 3 ret i32 %z } @@ -54,7 +54,7 @@ ; CHECK-NEXT: [[Z:%.*]] = sub i32 [[Y]], %x ; CHECK-NEXT: ret i32 [[Z]] ; - %y = sdiv i32 %x, 3 + %y = sdiv nof i32 %x, 3 %z = mul i32 %y, -3 ret i32 %z } @@ -64,7 +64,7 @@ ; CHECK-NEXT: [[Z:%.*]] = sub i32 0, %x ; CHECK-NEXT: ret i32 [[Z]] ; - %y = sdiv exact i32 %x, 3 + %y = sdiv exact nof i32 %x, 3 %z = mul i32 %y, -3 ret i32 %z } @@ -73,7 +73,7 @@ ; CHECK-LABEL: @udiv1( ; CHECK-NEXT: ret i32 %x ; - %y = udiv exact i32 %x, %w + %y = udiv exact nof i32 %x, %w %z = mul i32 %y, %w ret i32 %z } @@ -84,7 +84,7 @@ ; CHECK-NEXT: ret i32 [[Z]] ; %y = shl i32 1, %w - %z = udiv exact i32 %x, %y + %z = udiv exact nof i32 %x, %y ret i32 %z } @@ -179,7 +179,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 %X, 0 ; CHECK-NEXT: ret i1 [[TMP1]] ; - %A = udiv exact i64 %X, 5 ; X/5 + %A = udiv exact nof i64 %X, 5 ; X/5 %B = icmp ne i64 %A, 0 ret i1 %B } @@ -189,7 +189,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <2 x i64> %X, zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; - %A = udiv exact <2 x i64> %X, <i64 5, i64 5> + %A = udiv exact nof <2 x i64> %X, <i64 5, i64 5> %B = icmp ne <2 x i64> %A, zeroinitializer ret <2 x i1> %B } @@ -199,7 +199,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 %X, 0 ; CHECK-NEXT: ret i1 [[TMP1]] ; - %A = udiv exact i64 %X, 5 ; X/5 == 0 --> x == 0 + %A = udiv exact nof i64 %X, 5 ; X/5 == 0 --> x == 0 %B = icmp eq i64 %A, 0 ret i1 %B } @@ -209,7 +209,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i64> %X, zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; - %A = udiv exact <2 x i64> %X, <i64 5, i64 5> + %A = udiv exact nof <2 x i64> %X, <i64 5, i64 5> %B = icmp eq <2 x i64> %A, zeroinitializer ret <2 x i1> %B } @@ -219,7 +219,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 %X, 0 ; CHECK-NEXT: ret i1 [[TMP1]] ; - %A = sdiv exact i64 %X, 5 ; X/5 == 0 --> x == 0 + %A = sdiv exact nof i64 %X, 5 ; X/5 == 0 --> x == 0 %B = icmp eq i64 %A, 0 ret i1 %B } @@ -229,7 +229,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i64> %X, zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; - %A = sdiv exact <2 x i64> %X, <i64 5, i64 5> + %A = sdiv exact nof <2 x i64> %X, <i64 5, i64 5> %B = icmp eq <2 x i64> %A, zeroinitializer ret <2 x i1> %B } @@ -239,7 +239,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 %X, 5 ; CHECK-NEXT: ret i1 [[TMP1]] ; - %A = sdiv exact i64 %X, 5 ; X/5 == 1 --> x == 5 + %A = sdiv exact nof i64 %X, 5 ; X/5 == 1 --> x == 5 %B = icmp eq i64 %A, 1 ret i1 %B } @@ -249,7 +249,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i64> %X, <i64 5, i64 5> ; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; - %A = sdiv exact <2 x i64> %X, <i64 5, i64 5> + %A = sdiv exact nof <2 x i64> %X, <i64 5, i64 5> %B = icmp eq <2 x i64> %A, <i64 1, i64 1> ret <2 x i1> %B } @@ -259,7 +259,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 %X, -5 ; CHECK-NEXT: ret i1 [[TMP1]] ; - %A = sdiv exact i64 %X, 5 ; X/5 == -1 --> x == -5 + %A = sdiv exact nof i64 %X, 5 ; X/5 == -1 --> x == -5 %B = icmp eq i64 %A, -1 ret i1 %B } @@ -269,7 +269,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i64> %X, <i64 -5, i64 -5> ; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; - %A = sdiv exact <2 x i64> %X, <i64 5, i64 5> + %A = sdiv exact nof <2 x i64> %X, <i64 5, i64 5> %B = icmp eq <2 x i64> %A, <i64 -1, i64 -1> ret <2 x i1> %B } @@ -279,7 +279,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 %X, 0 ; CHECK-NEXT: ret i1 [[TMP1]] ; - %A = sdiv exact i64 %X, -5 ; X/-5 == 0 --> x == 0 + %A = sdiv exact nof i64 %X, -5 ; X/-5 == 0 --> x == 0 %B = icmp eq i64 %A, 0 ret i1 %B } @@ -289,7 +289,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i64> %X, zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; - %A = sdiv exact <2 x i64> %X, <i64 -5, i64 -5> + %A = sdiv exact nof <2 x i64> %X, <i64 -5, i64 -5> %B = icmp eq <2 x i64> %A, zeroinitializer ret <2 x i1> %B } @@ -299,7 +299,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 %X, -5 ; CHECK-NEXT: ret i1 [[TMP1]] ; - %A = sdiv exact i64 %X, -5 ; X/-5 == 1 --> x == -5 + %A = sdiv exact nof i64 %X, -5 ; X/-5 == 1 --> x == -5 %B = icmp eq i64 %A, 1 ret i1 %B } @@ -309,7 +309,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i64> %X, <i64 -5, i64 -5> ; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; - %A = sdiv exact <2 x i64> %X, <i64 -5, i64 -5> + %A = sdiv exact nof <2 x i64> %X, <i64 -5, i64 -5> %B = icmp eq <2 x i64> %A, <i64 1, i64 1> ret <2 x i1> %B } @@ -319,7 +319,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i64 %X, 5 ; CHECK-NEXT: ret i1 [[TMP1]] ; - %A = sdiv exact i64 %X, -5 ; X/-5 == -1 --> x == 5 + %A = sdiv exact nof i64 %X, -5 ; X/-5 == -1 --> x == 5 %B = icmp eq i64 %A, -1 ret i1 %B } @@ -329,7 +329,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i64> %X, <i64 5, i64 5> ; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; - %A = sdiv exact <2 x i64> %X, <i64 -5, i64 -5> + %A = sdiv exact nof <2 x i64> %X, <i64 -5, i64 -5> %B = icmp eq <2 x i64> %A, <i64 -1, i64 -1> ret <2 x i1> %B } Index: test/Transforms/InstCombine/getelementptr.ll =================================================================== --- test/Transforms/InstCombine/getelementptr.ll +++ test/Transforms/InstCombine/getelementptr.ll @@ -843,7 +843,7 @@ define %struct.C* @test44(%struct.C* %c1, %struct.C* %c2) { %ptrtoint = ptrtoint %struct.C* %c1 to i64 %sub = sub i64 0, %ptrtoint - %shr = sdiv i64 %sub, 7 + %shr = sdiv nof i64 %sub, 7 %gep = getelementptr inbounds %struct.C, %struct.C* %c2, i64 %shr ret %struct.C* %gep @@ -859,7 +859,7 @@ %ptrtoint1 = ptrtoint %struct.C* %c1 to i64 %ptrtoint2 = ptrtoint %struct.C** %c2 to i64 %sub = sub i64 %ptrtoint2, %ptrtoint1 ; C2 - C1 - %shr = sdiv i64 %sub, 7 + %shr = sdiv nof i64 %sub, 7 %gep = getelementptr inbounds %struct.C, %struct.C* %c1, i64 %shr ; C1 + (C2 - C1) ret %struct.C* %gep @@ -871,14 +871,14 @@ define %struct.C* @test46(%struct.C* %c1, %struct.C* %c2, i64 %N) { %ptrtoint = ptrtoint %struct.C* %c1 to i64 %sub = sub i64 0, %ptrtoint - %sdiv = sdiv i64 %sub, %N + %sdiv = sdiv nof i64 %sub, %N %gep = getelementptr inbounds %struct.C, %struct.C* %c2, i64 %sdiv ret %struct.C* %gep ; CHECK-LABEL: @test46( ; CHECK-NEXT: [[PTRTOINT:%.*]] = ptrtoint %struct.C* %c1 to i64 ; CHECK-NEXT: [[SUB:%.*]] = sub i64 0, [[PTRTOINT]] -; CHECK-NEXT: [[SDIV:%.*]] = sdiv i64 [[SUB]], %N +; CHECK-NEXT: [[SDIV:%.*]] = sdiv nof i64 [[SUB]], %N ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds %struct.C, %struct.C* %c2, i64 %sdiv ; CHECK-NEXT: ret %struct.C* [[GEP]] } Index: test/Transforms/InstCombine/hoist_instr.ll =================================================================== --- test/Transforms/InstCombine/hoist_instr.ll +++ test/Transforms/InstCombine/hoist_instr.ll @@ -7,12 +7,12 @@ then: ; preds = %entry ; CHECK: then: -; CHECK-NEXT: sdiv i32 +; CHECK-NEXT: sdiv nof i32 br label %endif endif: ; preds = %then, %entry %X = phi i32 [ %A, %then ], [ 15, %entry ] ; <i32> [#uses=1] - %Y = sdiv i32 %X, 42 ; <i32> [#uses=1] + %Y = sdiv nof i32 %X, 42 ; <i32> [#uses=1] ret i32 %Y } Index: test/Transforms/InstCombine/icmp-div-constant.ll =================================================================== --- test/Transforms/InstCombine/icmp-div-constant.ll +++ test/Transforms/InstCombine/icmp-div-constant.ll @@ -24,7 +24,7 @@ br i1 %tobool, label %then, label %exit then: - %div = sdiv i16 %c, -1 + %div = sdiv nof i16 %c, -1 %cmp = icmp ne i16 %div, 0 br label %exit @@ -51,7 +51,7 @@ br i1 %tobool, label %then, label %exit then: - %div = sdiv i16 %c, 0 + %div = sdiv nof i16 %c, 0 %cmp = icmp ne i16 %div, 0 br label %exit @@ -80,7 +80,7 @@ br i1 %tobool, label %then, label %exit then: - %div = sdiv i16 %c, 1 + %div = sdiv nof i16 %c, 1 %cmp = icmp ne i16 %div, 0 br label %exit Index: test/Transforms/InstCombine/icmp.ll =================================================================== --- test/Transforms/InstCombine/icmp.ll +++ test/Transforms/InstCombine/icmp.ll @@ -371,7 +371,7 @@ ; CHECK-NEXT: [[I4:%.*]] = icmp sgt i32 %x, 1328634634 ; CHECK-NEXT: ret i1 [[I4]] ; - %i3 = sdiv i32 %x, -1328634635 + %i3 = sdiv nof i32 %x, -1328634635 %i4 = icmp eq i32 %i3, -1 ret i1 %i4 } @@ -381,7 +381,7 @@ ; CHECK-NEXT: [[I4:%.*]] = icmp sgt <2 x i32> %x, <i32 1328634634, i32 1328634634> ; CHECK-NEXT: ret <2 x i1> [[I4]] ; - %i3 = sdiv <2 x i32> %x, <i32 -1328634635, i32 -1328634635> + %i3 = sdiv nof <2 x i32> %x, <i32 -1328634635, i32 -1328634635> %i4 = icmp eq <2 x i32> %i3, <i32 -1, i32 -1> ret <2 x i1> %i4 } @@ -710,8 +710,8 @@ ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 %X, %Y ; CHECK-NEXT: ret i1 [[C]] ; - %A = sdiv exact i32 %X, %Z - %B = sdiv exact i32 %Y, %Z + %A = sdiv exact nof i32 %X, %Z + %B = sdiv exact nof i32 %Y, %Z %C = icmp eq i32 %A, %B ret i1 %C } @@ -720,13 +720,13 @@ define i1 @PR32949(i32 %X, i32 %Y, i32 %Z) { ; CHECK-LABEL: @PR32949( -; CHECK-NEXT: [[A:%.*]] = sdiv exact i32 %X, %Z -; CHECK-NEXT: [[B:%.*]] = sdiv exact i32 %Y, %Z +; CHECK-NEXT: [[A:%.*]] = sdiv exact nof i32 %X, %Z +; CHECK-NEXT: [[B:%.*]] = sdiv exact nof i32 %Y, %Z ; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[A]], [[B]] ; CHECK-NEXT: ret i1 [[C]] ; - %A = sdiv exact i32 %X, %Z - %B = sdiv exact i32 %Y, %Z + %A = sdiv exact nof i32 %X, %Z + %B = sdiv exact nof i32 %Y, %Z %C = icmp sgt i32 %A, %B ret i1 %C } @@ -802,13 +802,13 @@ ; PR9838 define i1 @test53(i32 %a, i32 %b) { ; CHECK-LABEL: @test53( -; CHECK-NEXT: [[X:%.*]] = sdiv exact i32 %a, 30 -; CHECK-NEXT: [[Y:%.*]] = sdiv i32 %b, 30 +; CHECK-NEXT: [[X:%.*]] = sdiv exact nof i32 %a, 30 +; CHECK-NEXT: [[Y:%.*]] = sdiv nof i32 %b, 30 ; CHECK-NEXT: [[Z:%.*]] = icmp eq i32 [[X]], [[Y]] ; CHECK-NEXT: ret i1 [[Z]] ; - %x = sdiv exact i32 %a, 30 - %y = sdiv i32 %b, 30 + %x = sdiv exact nof i32 %a, 30 + %y = sdiv nof i32 %b, 30 %z = icmp eq i32 %x, %y ret i1 %z } Index: test/Transforms/InstCombine/intrinsics.ll =================================================================== --- test/Transforms/InstCombine/intrinsics.ll +++ test/Transforms/InstCombine/intrinsics.ll @@ -594,7 +594,7 @@ ; CHECK-LABEL: @overflow_div_add( ; CHECK-NEXT: ret i1 false ; - %div = sdiv i32 %v1, 2 + %div = sdiv nof i32 %v1, 2 %t = call %ov.result.32 @llvm.sadd.with.overflow.i32(i32 %div, i32 1) %obit = extractvalue %ov.result.32 %t, 1 ret i1 %obit @@ -606,7 +606,7 @@ ; CHECK-NEXT: ret i1 false ; %a = ashr i32 %v1, 18 - %div = sdiv i32 %a, 65536 + %div = sdiv nof i32 %a, 65536 %t = call %ov.result.32 @llvm.ssub.with.overflow.i32(i32 %div, i32 1) %obit = extractvalue %ov.result.32 %t, 1 ret i1 %obit Index: test/Transforms/InstCombine/nsw.ll =================================================================== --- test/Transforms/InstCombine/nsw.ll +++ test/Transforms/InstCombine/nsw.ll @@ -2,20 +2,20 @@ ; CHECK-LABEL: @sub1( ; CHECK: %y = sub i32 0, %x -; CHECK: %z = sdiv i32 %y, 337 +; CHECK: %z = sdiv nof i32 %y, 337 ; CHECK: ret i32 %z define i32 @sub1(i32 %x) { %y = sub i32 0, %x - %z = sdiv i32 %y, 337 + %z = sdiv nof i32 %y, 337 ret i32 %z } ; CHECK-LABEL: @sub2( -; CHECK: %z = sdiv i32 %x, -337 +; CHECK: %z = sdiv nof i32 %x, -337 ; CHECK: ret i32 %z define i32 @sub2(i32 %x) { %y = sub nsw i32 0, %x - %z = sdiv i32 %y, 337 + %z = sdiv nof i32 %y, 337 ret i32 %z } Index: test/Transforms/InstCombine/preserve-sminmax.ll =================================================================== --- test/Transforms/InstCombine/preserve-sminmax.ll +++ test/Transforms/InstCombine/preserve-sminmax.ll @@ -1,31 +1,31 @@ ; RUN: opt < %s -instcombine -S | FileCheck %s -; Instcombine normally would fold the sdiv into the comparison, -; making "icmp slt i32 %h, 2", but in this case the sdiv has +; Instcombine normally would fold the sdiv nof into the comparison, +; making "icmp slt i32 %h, 2", but in this case the sdiv nof has ; another use, so it wouldn't a big win, and it would also ; obfuscate an otherise obvious smax pattern to the point where ; other analyses wouldn't recognize it. define i32 @foo(i32 %h) { - %sd = sdiv i32 %h, 2 + %sd = sdiv nof i32 %h, 2 %t = icmp slt i32 %sd, 1 %r = select i1 %t, i32 %sd, i32 1 ret i32 %r } -; CHECK: %sd = sdiv i32 %h, 2 +; CHECK: %sd = sdiv nof i32 %h, 2 ; CHECK: %t = icmp slt i32 %sd, 1 ; CHECK: %r = select i1 %t, i32 %sd, i32 1 ; CHECK: ret i32 %r define i32 @bar(i32 %h) { - %sd = sdiv i32 %h, 2 + %sd = sdiv nof i32 %h, 2 %t = icmp sgt i32 %sd, 1 %r = select i1 %t, i32 %sd, i32 1 ret i32 %r } -; CHECK: %sd = sdiv i32 %h, 2 +; CHECK: %sd = sdiv nof i32 %h, 2 ; CHECK: %t = icmp sgt i32 %sd, 1 ; CHECK: %r = select i1 %t, i32 %sd, i32 1 ; CHECK: ret i32 %r Index: test/Transforms/InstCombine/rem.ll =================================================================== --- test/Transforms/InstCombine/rem.ll +++ test/Transforms/InstCombine/rem.ll @@ -6,7 +6,7 @@ ; CHECK-NEXT: [[R:%.*]] = srem i64 %x1, %y2 ; CHECK-NEXT: ret i64 [[R]] ; - %r = sdiv i64 %x1, %y2 + %r = sdiv nof i64 %x1, %y2 %r7 = mul i64 %r, %y2 %r8 = sub i64 %x1, %r7 ret i64 %r8 @@ -17,7 +17,7 @@ ; CHECK-NEXT: [[K:%.*]] = srem <4 x i32> %t, %u ; CHECK-NEXT: ret <4 x i32> [[K]] ; - %k = sdiv <4 x i32> %t, %u + %k = sdiv nof <4 x i32> %t, %u %l = mul <4 x i32> %k, %u %m = sub <4 x i32> %t, %l ret <4 x i32> %m @@ -28,7 +28,7 @@ ; CHECK-NEXT: [[R:%.*]] = urem i64 %x1, %y2 ; CHECK-NEXT: ret i64 [[R]] ; - %r = udiv i64 %x1, %y2 + %r = udiv nof i64 %x1, %y2 %r7 = mul i64 %r, %y2 %r8 = sub i64 %x1, %r7 ret i64 %r8 @@ -74,7 +74,7 @@ ; CHECK-NEXT: [[A:%.*]] = urem i8 %x, %y ; CHECK-NEXT: ret i8 [[A]] ; - %A = udiv i8 %x, %y + %A = udiv nof i8 %x, %y %B = mul i8 %A, %y %C = sub i8 %x, %B ret i8 %C @@ -85,7 +85,7 @@ ; CHECK-NEXT: [[A:%.*]] = srem i8 %x, %y ; CHECK-NEXT: ret i8 [[A]] ; - %A = sdiv i8 %x, %y + %A = sdiv nof i8 %x, %y %B = mul i8 %A, %y %C = sub i8 %x, %B ret i8 %C @@ -97,7 +97,7 @@ ; CHECK-NEXT: [[C:%.*]] = sub i8 0, [[A]] ; CHECK-NEXT: ret i8 [[C]] ; - %A = udiv i8 %x, %y + %A = udiv nof i8 %x, %y %B = mul i8 %A, %y %C = sub i8 %B, %x ret i8 %C @@ -110,7 +110,7 @@ ; CHECK-NEXT: [[C:%.*]] = add i8 [[B1]], %x ; CHECK-NEXT: ret i8 [[C]] ; - %A = udiv i8 %x, 3 + %A = udiv nof i8 %x, 3 %B = mul i8 %A, -3 %C = sub i8 %x, %B ret i8 %C @@ -120,12 +120,12 @@ define i32 @sdiv_mul_sdiv(i32 %x, i32 %y) { ; CHECK-LABEL: @sdiv_mul_sdiv( -; CHECK-NEXT: [[R:%.*]] = sdiv i32 %x, %y +; CHECK-NEXT: [[R:%.*]] = sdiv nof i32 %x, %y ; CHECK-NEXT: ret i32 [[R]] ; - %div = sdiv i32 %x, %y + %div = sdiv nof i32 %x, %y %mul = mul i32 %div, %y - %r = sdiv i32 %mul, %y + %r = sdiv nof i32 %mul, %y ret i32 %r } @@ -133,12 +133,12 @@ define i32 @udiv_mul_udiv(i32 %x, i32 %y) { ; CHECK-LABEL: @udiv_mul_udiv( -; CHECK-NEXT: [[R:%.*]] = udiv i32 %x, %y +; CHECK-NEXT: [[R:%.*]] = udiv nof i32 %x, %y ; CHECK-NEXT: ret i32 [[R]] ; - %div = udiv i32 %x, %y + %div = udiv nof i32 %x, %y %mul = mul i32 %div, %y - %r = udiv i32 %mul, %y + %r = udiv nof i32 %mul, %y ret i32 %r } Index: test/Transforms/InstCombine/sdiv-1.ll =================================================================== --- test/Transforms/InstCombine/sdiv-1.ll +++ test/Transforms/InstCombine/sdiv-1.ll @@ -6,7 +6,7 @@ define i32 @a(i32 %X) nounwind readnone { entry: %0 = sub i32 0, %X - %1 = sdiv i32 %0, -3 + %1 = sdiv nof i32 %0, -3 ret i32 %1 } @@ -19,6 +19,6 @@ define i32 @c(i32 %X) nounwind readnone { entry: %0 = sub i32 0, -2147483648 - %1 = sdiv i32 %0, -3 + %1 = sdiv nof i32 %0, -3 ret i32 %1 } Index: test/Transforms/InstCombine/sdiv-2.ll =================================================================== --- test/Transforms/InstCombine/sdiv-2.ll +++ test/Transforms/InstCombine/sdiv-2.ll @@ -5,7 +5,7 @@ entry: %0 = icmp ne i32 %length, -1 ; <i1> [#uses=1] %iftmp.13.0 = select i1 %0, i128 0, i128 200000000 ; <i128> [#uses=2] - %1 = sdiv i128 %iftmp.13.0, 10 ; <i128> [#uses=1] + %1 = sdiv nof i128 %iftmp.13.0, 10 ; <i128> [#uses=1] br label %bb5 bb5: ; preds = %bb8, %entry Index: test/Transforms/InstCombine/select.ll =================================================================== --- test/Transforms/InstCombine/select.ll +++ test/Transforms/InstCombine/select.ll @@ -495,10 +495,10 @@ define i32 @test18(i32 %X, i32 %Y, i1 %C) { %R = select i1 %C, i32 %X, i32 0 - %V = sdiv i32 %Y, %R + %V = sdiv nof i32 %Y, %R ret i32 %V ; CHECK-LABEL: @test18( -; CHECK: %V = sdiv i32 %Y, %X +; CHECK: %V = sdiv nof i32 %Y, %X ; CHECK: ret i32 %V } @@ -1016,7 +1016,7 @@ lor.end: %t.1 = phi i32 [ 0, %entry ], [ %phitmp, %lor.rhs ] %conv6 = zext i16 %b to i32 - %div = udiv i32 %conv6, %t.1 + %div = udiv nof i32 %conv6, %t.1 %tobool8 = icmp eq i32 %div, 0 %cmp = icmp eq i32 %t.1, 0 %cmp12 = icmp ult i32 %conv2, 2 Index: test/Transforms/InstCombine/sext.ll =================================================================== --- test/Transforms/InstCombine/sext.ll +++ test/Transforms/InstCombine/sext.ll @@ -42,11 +42,11 @@ define i64 @test4(i32 %x) { ; CHECK-LABEL: @test4( -; CHECK-NEXT: [[T:%.*]] = udiv i32 %x, 3 +; CHECK-NEXT: [[T:%.*]] = udiv nof i32 %x, 3 ; CHECK-NEXT: [[S1:%.*]] = zext i32 [[T]] to i64 ; CHECK-NEXT: ret i64 [[S1]] ; - %t = udiv i32 %x, 3 + %t = udiv nof i32 %x, 3 %s = sext i32 %t to i64 ret i64 %s } Index: test/Transforms/InstCombine/shift.ll =================================================================== --- test/Transforms/InstCombine/shift.ll +++ test/Transforms/InstCombine/shift.ll @@ -227,13 +227,13 @@ ; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 6 ; CHECK-NEXT: [[B:%.*]] = and i8 [[X]], -64 ; CHECK-NEXT: [[EXTRA_USE_OF_A:%.*]] = mul nsw i8 [[A]], 5 -; CHECK-NEXT: [[R:%.*]] = sdiv i8 [[EXTRA_USE_OF_A]], [[B]] +; CHECK-NEXT: [[R:%.*]] = sdiv nof i8 [[EXTRA_USE_OF_A]], [[B]] ; CHECK-NEXT: ret i8 [[R]] ; %a = ashr i8 %x, 6 %b = shl i8 %a, 6 %extra_use_of_a = mul i8 %a, 5 - %r = sdiv i8 %extra_use_of_a, %b + %r = sdiv nof i8 %extra_use_of_a, %b ret i8 %r } @@ -739,7 +739,7 @@ ; %shl1 = shl i32 1, %b %shl2 = shl i32 %shl1, 2 - %div = udiv i32 %a, %shl2 + %div = udiv nof i32 %a, %shl2 ret i32 %div } @@ -756,22 +756,22 @@ define i32 @test42(i32 %a, i32 %b) nounwind { ; CHECK-LABEL: @test42( ; CHECK-NEXT: [[DIV:%.*]] = lshr exact i32 4096, %b -; CHECK-NEXT: [[DIV2:%.*]] = udiv i32 %a, [[DIV]] +; CHECK-NEXT: [[DIV2:%.*]] = udiv nof i32 %a, [[DIV]] ; CHECK-NEXT: ret i32 [[DIV2]] ; %div = lshr i32 4096, %b ; must be exact otherwise we'd divide by zero - %div2 = udiv i32 %a, %div + %div2 = udiv nof i32 %a, %div ret i32 %div2 } define <2 x i32> @test42vec(<2 x i32> %a, <2 x i32> %b) { ; CHECK-LABEL: @test42vec( ; CHECK-NEXT: [[DIV:%.*]] = lshr exact <2 x i32> <i32 4096, i32 4096>, %b -; CHECK-NEXT: [[DIV2:%.*]] = udiv <2 x i32> %a, [[DIV]] +; CHECK-NEXT: [[DIV2:%.*]] = udiv nof <2 x i32> %a, [[DIV]] ; CHECK-NEXT: ret <2 x i32> [[DIV2]] ; %div = lshr <2 x i32> <i32 4096, i32 4096>, %b ; must be exact otherwise we'd divide by zero - %div2 = udiv <2 x i32> %a, %div + %div2 = udiv nof <2 x i32> %a, %div ret <2 x i32> %div2 } @@ -782,7 +782,7 @@ ; CHECK-NEXT: ret i32 [[DIV2]] ; %div = shl i32 4096, %b ; must be exact otherwise we'd divide by zero - %div2 = udiv i32 %a, %div + %div2 = udiv nof i32 %a, %div ret i32 %div2 } Index: test/Transforms/InstCombine/sink_instruction.ll =================================================================== --- test/Transforms/InstCombine/sink_instruction.ll +++ test/Transforms/InstCombine/sink_instruction.ll @@ -6,7 +6,7 @@ define i32 @test1(i1 %C, i32 %A, i32 %B) { ; CHECK-LABEL: @test1( entry: - %tmp.2 = sdiv i32 %A, %B ; <i32> [#uses=1] + %tmp.2 = sdiv nof i32 %A, %B ; <i32> [#uses=1] %tmp.9 = add i32 %B, %A ; <i32> [#uses=1] br i1 %C, label %then, label %endif @@ -14,7 +14,7 @@ ret i32 %tmp.9 endif: ; preds = %entry -; CHECK: sdiv i32 +; CHECK: sdiv nof i32 ; CHECK-NEXT: ret i32 ret i32 %tmp.2 } @@ -23,7 +23,7 @@ ;; PHI use, sink divide before call. define i32 @test2(i32 %x) nounwind ssp { ; CHECK-LABEL: @test2( -; CHECK-NOT: sdiv i32 +; CHECK-NOT: sdiv nof i32 entry: br label %bb @@ -31,14 +31,14 @@ %x_addr.17 = phi i32 [ %x, %entry ], [ %x_addr.0, %bb2 ] ; <i32> [#uses=4] %i.06 = phi i32 [ 0, %entry ], [ %4, %bb2 ] ; <i32> [#uses=1] %0 = add nsw i32 %x_addr.17, 1 ; <i32> [#uses=1] - %1 = sdiv i32 %0, %x_addr.17 ; <i32> [#uses=1] + %1 = sdiv nof i32 %0, %x_addr.17 ; <i32> [#uses=1] %2 = icmp eq i32 %x_addr.17, 0 ; <i1> [#uses=1] br i1 %2, label %bb1, label %bb2 bb1: ; preds = %bb ; CHECK: bb1: ; CHECK-NEXT: add nsw i32 %x_addr.17, 1 -; CHECK-NEXT: sdiv i32 +; CHECK-NEXT: sdiv nof i32 ; CHECK-NEXT: tail call i32 @bar() %3 = tail call i32 @bar() nounwind ; <i32> [#uses=0] br label %bb2 Index: test/Transforms/InstCombine/sub.ll =================================================================== --- test/Transforms/InstCombine/sub.ll +++ test/Transforms/InstCombine/sub.ll @@ -196,10 +196,10 @@ define i32 @test16(i32 %A) { ; CHECK-LABEL: @test16( -; CHECK-NEXT: [[Y:%.*]] = sdiv i32 [[A:%.*]], -1123 +; CHECK-NEXT: [[Y:%.*]] = sdiv nof i32 [[A:%.*]], -1123 ; CHECK-NEXT: ret i32 [[Y]] ; - %X = sdiv i32 %A, 1123 + %X = sdiv nof i32 %A, 1123 %Y = sub i32 0, %X ret i32 %Y } @@ -209,11 +209,11 @@ define i32 @test17(i32 %A) { ; CHECK-LABEL: @test17( ; CHECK-NEXT: [[B:%.*]] = sub i32 0, [[A:%.*]] -; CHECK-NEXT: [[C:%.*]] = sdiv i32 [[B]], 1234 +; CHECK-NEXT: [[C:%.*]] = sdiv nof i32 [[B]], 1234 ; CHECK-NEXT: ret i32 [[C]] ; %B = sub i32 0, %A - %C = sdiv i32 %B, 1234 + %C = sdiv nof i32 %B, 1234 ret i32 %C } @@ -616,7 +616,7 @@ ; CHECK-NEXT: [[SUB:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[SUB]] ; - %div = sdiv <2 x i32> %A, <i32 -2147483648, i32 -2147483648> + %div = sdiv nof <2 x i32> %A, <i32 -2147483648, i32 -2147483648> %sub = sub nsw <2 x i32> zeroinitializer, %div ret <2 x i32> %sub } @@ -627,7 +627,7 @@ ; CHECK-NEXT: [[SUB:%.*]] = sext i1 [[TMP1]] to i32 ; CHECK-NEXT: ret i32 [[SUB]] ; - %div = sdiv i32 %A, -2147483648 + %div = sdiv nof i32 %A, -2147483648 %sub = sub nsw i32 0, %div ret i32 %sub } Index: test/Transforms/InstCombine/trunc-binop-ext.ll =================================================================== --- test/Transforms/InstCombine/trunc-binop-ext.ll +++ test/Transforms/InstCombine/trunc-binop-ext.ll @@ -149,12 +149,12 @@ define <2 x i16> @narrow_sext_and_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-LABEL: @narrow_sext_and_commute( -; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> %y32, <i32 7, i32 -17> +; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> ; CHECK-NEXT: [[R:%.*]] = and <2 x i16> [[TMP1]], %x16 ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17> + %y32op0 = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> %x32 = sext <2 x i16> %x16 to <2 x i32> %b = and <2 x i32> %y32op0, %x32 %r = trunc <2 x i32> %b to <2 x i16> @@ -163,12 +163,12 @@ define <2 x i16> @narrow_zext_and_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-LABEL: @narrow_zext_and_commute( -; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> %y32, <i32 7, i32 -17> +; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> ; CHECK-NEXT: [[R:%.*]] = and <2 x i16> [[TMP1]], %x16 ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17> + %y32op0 = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> %x32 = zext <2 x i16> %x16 to <2 x i32> %b = and <2 x i32> %y32op0, %x32 %r = trunc <2 x i32> %b to <2 x i16> @@ -177,12 +177,12 @@ define <2 x i16> @narrow_sext_or_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-LABEL: @narrow_sext_or_commute( -; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> %y32, <i32 7, i32 -17> +; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> ; CHECK-NEXT: [[R:%.*]] = or <2 x i16> [[TMP1]], %x16 ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17> + %y32op0 = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> %x32 = sext <2 x i16> %x16 to <2 x i32> %b = or <2 x i32> %y32op0, %x32 %r = trunc <2 x i32> %b to <2 x i16> @@ -191,12 +191,12 @@ define <2 x i16> @narrow_zext_or_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-LABEL: @narrow_zext_or_commute( -; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> %y32, <i32 7, i32 -17> +; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> ; CHECK-NEXT: [[R:%.*]] = or <2 x i16> [[TMP1]], %x16 ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17> + %y32op0 = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> %x32 = zext <2 x i16> %x16 to <2 x i32> %b = or <2 x i32> %y32op0, %x32 %r = trunc <2 x i32> %b to <2 x i16> @@ -205,12 +205,12 @@ define <2 x i16> @narrow_sext_xor_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-LABEL: @narrow_sext_xor_commute( -; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> %y32, <i32 7, i32 -17> +; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> ; CHECK-NEXT: [[R:%.*]] = xor <2 x i16> [[TMP1]], %x16 ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17> + %y32op0 = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> %x32 = sext <2 x i16> %x16 to <2 x i32> %b = xor <2 x i32> %y32op0, %x32 %r = trunc <2 x i32> %b to <2 x i16> @@ -219,12 +219,12 @@ define <2 x i16> @narrow_zext_xor_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-LABEL: @narrow_zext_xor_commute( -; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> %y32, <i32 7, i32 -17> +; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> ; CHECK-NEXT: [[R:%.*]] = xor <2 x i16> [[TMP1]], %x16 ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17> + %y32op0 = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> %x32 = zext <2 x i16> %x16 to <2 x i32> %b = xor <2 x i32> %y32op0, %x32 %r = trunc <2 x i32> %b to <2 x i16> @@ -233,12 +233,12 @@ define <2 x i16> @narrow_sext_add_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-LABEL: @narrow_sext_add_commute( -; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> %y32, <i32 7, i32 -17> +; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> ; CHECK-NEXT: [[R:%.*]] = add <2 x i16> [[TMP1]], %x16 ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17> + %y32op0 = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> %x32 = sext <2 x i16> %x16 to <2 x i32> %b = add <2 x i32> %y32op0, %x32 %r = trunc <2 x i32> %b to <2 x i16> @@ -247,12 +247,12 @@ define <2 x i16> @narrow_zext_add_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-LABEL: @narrow_zext_add_commute( -; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> %y32, <i32 7, i32 -17> +; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> ; CHECK-NEXT: [[R:%.*]] = add <2 x i16> [[TMP1]], %x16 ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17> + %y32op0 = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> %x32 = zext <2 x i16> %x16 to <2 x i32> %b = add <2 x i32> %y32op0, %x32 %r = trunc <2 x i32> %b to <2 x i16> @@ -261,12 +261,12 @@ define <2 x i16> @narrow_sext_sub_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-LABEL: @narrow_sext_sub_commute( -; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> %y32, <i32 7, i32 -17> +; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> ; CHECK-NEXT: [[R:%.*]] = sub <2 x i16> [[TMP1]], %x16 ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17> + %y32op0 = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> %x32 = sext <2 x i16> %x16 to <2 x i32> %b = sub <2 x i32> %y32op0, %x32 %r = trunc <2 x i32> %b to <2 x i16> @@ -275,12 +275,12 @@ define <2 x i16> @narrow_zext_sub_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-LABEL: @narrow_zext_sub_commute( -; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> %y32, <i32 7, i32 -17> +; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> ; CHECK-NEXT: [[R:%.*]] = sub <2 x i16> [[TMP1]], %x16 ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17> + %y32op0 = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> %x32 = zext <2 x i16> %x16 to <2 x i32> %b = sub <2 x i32> %y32op0, %x32 %r = trunc <2 x i32> %b to <2 x i16> @@ -289,12 +289,12 @@ define <2 x i16> @narrow_sext_mul_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-LABEL: @narrow_sext_mul_commute( -; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> %y32, <i32 7, i32 -17> +; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> ; CHECK-NEXT: [[R:%.*]] = mul <2 x i16> [[TMP1]], %x16 ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17> + %y32op0 = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> %x32 = sext <2 x i16> %x16 to <2 x i32> %b = mul <2 x i32> %y32op0, %x32 %r = trunc <2 x i32> %b to <2 x i16> @@ -303,12 +303,12 @@ define <2 x i16> @narrow_zext_mul_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-LABEL: @narrow_zext_mul_commute( -; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> %y32, <i32 7, i32 -17> +; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> ; CHECK-NEXT: [[R:%.*]] = mul <2 x i16> [[TMP1]], %x16 ; CHECK-NEXT: ret <2 x i16> [[R]] ; - %y32op0 = sdiv <2 x i32> %y32, <i32 7, i32 -17> + %y32op0 = sdiv nof <2 x i32> %y32, <i32 7, i32 -17> %x32 = zext <2 x i16> %x16 to <2 x i32> %b = mul <2 x i32> %y32op0, %x32 %r = trunc <2 x i32> %b to <2 x i16> Index: test/Transforms/InstCombine/udiv-simplify.ll =================================================================== --- test/Transforms/InstCombine/udiv-simplify.ll +++ test/Transforms/InstCombine/udiv-simplify.ll @@ -6,7 +6,7 @@ ; CHECK-NEXT: ret i64 0 ; %y = lshr i32 %x, 1 - %r = udiv i32 %y, -1 + %r = udiv nof i32 %y, -1 %z = sext i32 %r to i64 ret i64 %z } @@ -15,7 +15,7 @@ ; CHECK-NEXT: ret i64 0 ; %y = lshr i32 %x, 31 - %r = udiv i32 %y, 3 + %r = udiv nof i32 %y, 3 %z = sext i32 %r to i64 ret i64 %z } @@ -26,24 +26,24 @@ define i64 @test1_PR2274(i32 %x, i32 %g) nounwind { ; CHECK-LABEL: @test1_PR2274( ; CHECK-NEXT: [[Y:%.*]] = lshr i32 [[X:%.*]], 30 -; CHECK-NEXT: [[R:%.*]] = udiv i32 [[Y]], [[G:%.*]] +; CHECK-NEXT: [[R:%.*]] = udiv nof i32 [[Y]], [[G:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[R]] to i64 ; CHECK-NEXT: ret i64 [[TMP1]] ; %y = lshr i32 %x, 30 - %r = udiv i32 %y, %g + %r = udiv nof i32 %y, %g %z = sext i32 %r to i64 ret i64 %z } define i64 @test2_PR2274(i32 %x, i32 %v) nounwind { ; CHECK-LABEL: @test2_PR2274( ; CHECK-NEXT: [[Y:%.*]] = lshr i32 [[X:%.*]], 31 -; CHECK-NEXT: [[R:%.*]] = udiv i32 [[Y]], [[V:%.*]] +; CHECK-NEXT: [[R:%.*]] = udiv nof i32 [[Y]], [[V:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[R]] to i64 ; CHECK-NEXT: ret i64 [[TMP1]] ; %y = lshr i32 %x, 31 - %r = udiv i32 %y, %v + %r = udiv nof i32 %y, %v %z = sext i32 %r to i64 ret i64 %z } @@ -59,7 +59,7 @@ ; CHECK-NEXT: ret i32 [[D]] ; %z = zext i1 %a to i32 - %d = udiv i32 %z, zext (i16 shl (i16 1, i16 ptrtoint ([1 x i16]* @b to i16)) to i32) + %d = udiv nof i32 %z, zext (i16 shl (i16 1, i16 ptrtoint ([1 x i16]* @b to i16)) to i32) ret i32 %d } @@ -70,7 +70,7 @@ ; CHECK-NEXT: store i1 false, i1* undef, align 1 ; CHECK-NEXT: ret i177 0 ; - %B5 = udiv i177 %Y, -1 + %B5 = udiv nof i177 %Y, -1 %B4 = add i177 %B5, -1 %B2 = add i177 %B4, -1 %B6 = mul i177 %B5, %B2 @@ -78,7 +78,7 @@ %B9 = xor i177 %B4, %B3 %B13 = ashr i177 %Y, %B2 %B22 = add i177 %B9, %B13 - %B1 = udiv i177 %B5, %B6 + %B1 = udiv nof i177 %B5, %B6 %C9 = icmp ult i177 %Y, %B22 store i1 %C9, i1* undef ret i177 %B1 Index: test/Transforms/InstCombine/udiv_select_to_select_shift.ll =================================================================== --- test/Transforms/InstCombine/udiv_select_to_select_shift.ll +++ test/Transforms/InstCombine/udiv_select_to_select_shift.ll @@ -2,7 +2,7 @@ ; RUN: opt < %s -instcombine -S | FileCheck %s ; Test that this transform works: -; udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2) +; udiv nof X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2) define i64 @test(i64 %X, i1 %Cond ) { ; CHECK-LABEL: @test( @@ -12,9 +12,9 @@ ; CHECK-NEXT: ret i64 [[SUM]] ; %divisor1 = select i1 %Cond, i64 16, i64 8 - %quotient1 = udiv i64 %X, %divisor1 + %quotient1 = udiv nof i64 %X, %divisor1 %divisor2 = select i1 %Cond, i64 8, i64 0 - %quotient2 = udiv i64 %X, %divisor2 + %quotient2 = udiv nof i64 %X, %divisor2 %sum = add i64 %quotient1, %quotient2 ret i64 %sum } @@ -24,13 +24,13 @@ define <2 x i32> @PR34856(<2 x i32> %t0, <2 x i32> %t1) { ; CHECK-LABEL: @PR34856( -; CHECK-NEXT: [[DIV1:%.*]] = udiv <2 x i32> %t1, <i32 -7, i32 -7> +; CHECK-NEXT: [[DIV1:%.*]] = udiv nof <2 x i32> %t1, <i32 -7, i32 -7> ; CHECK-NEXT: ret <2 x i32> [[DIV1]] ; %cmp = icmp eq <2 x i32> %t0, <i32 1, i32 1> %zext = zext <2 x i1> %cmp to <2 x i32> %neg = select <2 x i1> %cmp, <2 x i32> zeroinitializer, <2 x i32> <i32 -7, i32 -7> - %div1 = udiv <2 x i32> %t1, %neg + %div1 = udiv nof <2 x i32> %t1, %neg %use_cmp_again = add <2 x i32> %div1, %zext ret <2 x i32> %use_cmp_again } Index: test/Transforms/InstCombine/udivrem-change-width.ll =================================================================== --- test/Transforms/InstCombine/udivrem-change-width.ll +++ test/Transforms/InstCombine/udivrem-change-width.ll @@ -5,24 +5,24 @@ ; PR4548 define i8 @udiv_i8(i8 %a, i8 %b) { ; CHECK-LABEL: @udiv_i8( -; CHECK-NEXT: [[DIV:%.*]] = udiv i8 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i8 %a, %b ; CHECK-NEXT: ret i8 [[DIV]] ; %za = zext i8 %a to i32 %zb = zext i8 %b to i32 - %udiv = udiv i32 %za, %zb + %udiv = udiv nof i32 %za, %zb %conv3 = trunc i32 %udiv to i8 ret i8 %conv3 } define <2 x i8> @udiv_i8_vec(<2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: @udiv_i8_vec( -; CHECK-NEXT: [[DIV:%.*]] = udiv <2 x i8> %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof <2 x i8> %a, %b ; CHECK-NEXT: ret <2 x i8> [[DIV]] ; %za = zext <2 x i8> %a to <2 x i32> %zb = zext <2 x i8> %b to <2 x i32> - %udiv = udiv <2 x i32> %za, %zb + %udiv = udiv nof <2 x i32> %za, %zb %conv3 = trunc <2 x i32> %udiv to <2 x i8> ret <2 x i8> %conv3 } @@ -53,25 +53,25 @@ define i32 @udiv_i32(i8 %a, i8 %b) { ; CHECK-LABEL: @udiv_i32( -; CHECK-NEXT: [[DIV:%.*]] = udiv i8 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i8 %a, %b ; CHECK-NEXT: [[UDIV:%.*]] = zext i8 [[DIV]] to i32 ; CHECK-NEXT: ret i32 [[UDIV]] ; %za = zext i8 %a to i32 %zb = zext i8 %b to i32 - %udiv = udiv i32 %za, %zb + %udiv = udiv nof i32 %za, %zb ret i32 %udiv } define <2 x i32> @udiv_i32_vec(<2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: @udiv_i32_vec( -; CHECK-NEXT: [[DIV:%.*]] = udiv <2 x i8> %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof <2 x i8> %a, %b ; CHECK-NEXT: [[UDIV:%.*]] = zext <2 x i8> [[DIV]] to <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[UDIV]] ; %za = zext <2 x i8> %a to <2 x i32> %zb = zext <2 x i8> %b to <2 x i32> - %udiv = udiv <2 x i32> %za, %zb + %udiv = udiv nof <2 x i32> %za, %zb ret <2 x i32> %udiv } @@ -79,14 +79,14 @@ ; CHECK-LABEL: @udiv_i32_multiuse( ; CHECK-NEXT: [[ZA:%.*]] = zext i8 %a to i32 ; CHECK-NEXT: [[ZB:%.*]] = zext i8 %b to i32 -; CHECK-NEXT: [[UDIV:%.*]] = udiv i32 [[ZA]], [[ZB]] +; CHECK-NEXT: [[UDIV:%.*]] = udiv nof i32 [[ZA]], [[ZB]] ; CHECK-NEXT: [[EXTRA_USES:%.*]] = add nuw nsw i32 [[ZA]], [[ZB]] ; CHECK-NEXT: [[R:%.*]] = mul nuw nsw i32 [[UDIV]], [[EXTRA_USES]] ; CHECK-NEXT: ret i32 [[R]] ; %za = zext i8 %a to i32 %zb = zext i8 %b to i32 - %udiv = udiv i32 %za, %zb + %udiv = udiv nof i32 %za, %zb %extra_uses = add i32 %za, %zb %r = mul i32 %udiv, %extra_uses ret i32 %r @@ -94,13 +94,13 @@ define i32 @udiv_illegal_type(i9 %a, i9 %b) { ; CHECK-LABEL: @udiv_illegal_type( -; CHECK-NEXT: [[DIV:%.*]] = udiv i9 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i9 %a, %b ; CHECK-NEXT: [[UDIV:%.*]] = zext i9 [[DIV]] to i32 ; CHECK-NEXT: ret i32 [[UDIV]] ; %za = zext i9 %a to i32 %zb = zext i9 %b to i32 - %udiv = udiv i32 %za, %zb + %udiv = udiv nof i32 %za, %zb ret i32 %udiv } @@ -159,47 +159,47 @@ define i32 @udiv_i32_c(i8 %a) { ; CHECK-LABEL: @udiv_i32_c( -; CHECK-NEXT: [[DIV:%.*]] = udiv i8 %a, 10 +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i8 %a, 10 ; CHECK-NEXT: [[UDIV:%.*]] = zext i8 [[DIV]] to i32 ; CHECK-NEXT: ret i32 [[UDIV]] ; %za = zext i8 %a to i32 - %udiv = udiv i32 %za, 10 + %udiv = udiv nof i32 %za, 10 ret i32 %udiv } define <2 x i32> @udiv_i32_c_vec(<2 x i8> %a) { ; CHECK-LABEL: @udiv_i32_c_vec( -; CHECK-NEXT: [[TMP1:%.*]] = udiv <2 x i8> %a, <i8 10, i8 17> +; CHECK-NEXT: [[TMP1:%.*]] = udiv nof <2 x i8> %a, <i8 10, i8 17> ; CHECK-NEXT: [[UDIV:%.*]] = zext <2 x i8> [[TMP1]] to <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[UDIV]] ; %za = zext <2 x i8> %a to <2 x i32> - %udiv = udiv <2 x i32> %za, <i32 10, i32 17> + %udiv = udiv nof <2 x i32> %za, <i32 10, i32 17> ret <2 x i32> %udiv } define i32 @udiv_i32_c_multiuse(i8 %a) { ; CHECK-LABEL: @udiv_i32_c_multiuse( ; CHECK-NEXT: [[ZA:%.*]] = zext i8 %a to i32 -; CHECK-NEXT: [[UDIV:%.*]] = udiv i32 [[ZA]], 10 +; CHECK-NEXT: [[UDIV:%.*]] = udiv nof i32 [[ZA]], 10 ; CHECK-NEXT: [[EXTRA_USE:%.*]] = add nuw nsw i32 [[UDIV]], [[ZA]] ; CHECK-NEXT: ret i32 [[EXTRA_USE]] ; %za = zext i8 %a to i32 - %udiv = udiv i32 %za, 10 + %udiv = udiv nof i32 %za, 10 %extra_use = add i32 %za, %udiv ret i32 %extra_use } define i32 @udiv_illegal_type_c(i9 %a) { ; CHECK-LABEL: @udiv_illegal_type_c( -; CHECK-NEXT: [[DIV:%.*]] = udiv i9 %a, 10 +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i9 %a, 10 ; CHECK-NEXT: [[UDIV:%.*]] = zext i9 [[DIV]] to i32 ; CHECK-NEXT: ret i32 [[UDIV]] ; %za = zext i9 %a to i32 - %udiv = udiv i32 %za, 10 + %udiv = udiv nof i32 %za, 10 ret i32 %udiv } @@ -251,12 +251,12 @@ define i32 @udiv_c_i32(i8 %a) { ; CHECK-LABEL: @udiv_c_i32( -; CHECK-NEXT: [[TMP1:%.*]] = udiv i8 10, %a +; CHECK-NEXT: [[TMP1:%.*]] = udiv nof i8 10, %a ; CHECK-NEXT: [[UDIV:%.*]] = zext i8 [[TMP1]] to i32 ; CHECK-NEXT: ret i32 [[UDIV]] ; %za = zext i8 %a to i32 - %udiv = udiv i32 10, %za + %udiv = udiv nof i32 10, %za ret i32 %udiv } @@ -277,12 +277,12 @@ define i32 @udiv_constexpr(i8 %a) { ; CHECK-LABEL: @udiv_constexpr( -; CHECK-NEXT: [[TMP1:%.*]] = udiv i8 %a, ptrtoint ([1 x i8]* @b to i8) +; CHECK-NEXT: [[TMP1:%.*]] = udiv nof i8 %a, ptrtoint ([1 x i8]* @b to i8) ; CHECK-NEXT: [[D:%.*]] = zext i8 [[TMP1]] to i32 ; CHECK-NEXT: ret i32 [[D]] ; %za = zext i8 %a to i32 - %d = udiv i32 %za, zext (i8 ptrtoint ([1 x i8]* @b to i8) to i32) + %d = udiv nof i32 %za, zext (i8 ptrtoint ([1 x i8]* @b to i8) to i32) ret i32 %d } Index: test/Transforms/InstCombine/vec_phi_extract.ll =================================================================== --- test/Transforms/InstCombine/vec_phi_extract.ll +++ test/Transforms/InstCombine/vec_phi_extract.ll @@ -95,7 +95,7 @@ for.body: %dec43 = add <3 x i32> %input_1.addr.1, <i32 -1, i32 -1, i32 -1> %sub44 = sub <3 x i32> <i32 -1, i32 -1, i32 -1>, %dec43 - %div45 = sdiv <3 x i32> %input_2.addr.0, %sub44 + %div45 = sdiv nof <3 x i32> %input_2.addr.0, %sub44 br label %for.cond for.end: Index: test/Transforms/InstCombine/vec_shuffle.ll =================================================================== --- test/Transforms/InstCombine/vec_shuffle.ll +++ test/Transforms/InstCombine/vec_shuffle.ll @@ -215,7 +215,7 @@ store <4 x i16> %vecinit6, <4 x i16>* undef %tmp1 = load <4 x i16>, <4 x i16>* undef %vecinit11 = insertelement <4 x i16> undef, i16 %conv10, i32 3 - %div = udiv <4 x i16> %tmp1, %vecinit11 + %div = udiv nof <4 x i16> %tmp1, %vecinit11 store <4 x i16> %div, <4 x i16>* %tmp %tmp4 = load <4 x i16>, <4 x i16>* %tmp %tmp5 = shufflevector <4 x i16> %tmp4, <4 x i16> undef, <2 x i32> <i32 2, i32 0> Index: test/Transforms/InstSimplify/2011-02-01-Vector.ll =================================================================== --- test/Transforms/InstSimplify/2011-02-01-Vector.ll +++ test/Transforms/InstSimplify/2011-02-01-Vector.ll @@ -2,7 +2,7 @@ define <2 x i32> @sdiv(<2 x i32> %x) { ; CHECK-LABEL: @sdiv( - %div = sdiv <2 x i32> %x, <i32 1, i32 1> + %div = sdiv nof <2 x i32> %x, <i32 1, i32 1> ret <2 x i32> %div ; CHECK: ret <2 x i32> %x } Index: test/Transforms/InstSimplify/compare.ll =================================================================== --- test/Transforms/InstSimplify/compare.ll +++ test/Transforms/InstSimplify/compare.ll @@ -580,20 +580,20 @@ ; CHECK-LABEL: @udiv2( ; CHECK-NEXT: ret i1 true ; - %A = udiv exact i32 10, %Z - %B = udiv exact i32 20, %Z + %A = udiv exact nof i32 10, %Z + %B = udiv exact nof i32 20, %Z %C = icmp ult i32 %A, %B ret i1 %C } -; Exact sdiv and equality preds can simplify. +; Exact sdiv nof and equality preds can simplify. define i1 @sdiv_exact_equality(i32 %Z) { ; CHECK-LABEL: @sdiv_exact_equality( ; CHECK-NEXT: ret i1 false ; - %A = sdiv exact i32 10, %Z - %B = sdiv exact i32 20, %Z + %A = sdiv exact nof i32 10, %Z + %B = sdiv exact nof i32 20, %Z %C = icmp eq i32 %A, %B ret i1 %C } @@ -602,20 +602,20 @@ define i1 @sdiv_exact_not_equality(i32 %Z) { ; CHECK-LABEL: @sdiv_exact_not_equality( -; CHECK-NEXT: [[A:%.*]] = sdiv exact i32 10, %Z -; CHECK-NEXT: [[B:%.*]] = sdiv exact i32 20, %Z +; CHECK-NEXT: [[A:%.*]] = sdiv exact nof i32 10, %Z +; CHECK-NEXT: [[B:%.*]] = sdiv exact nof i32 20, %Z ; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[A]], [[B]] ; CHECK-NEXT: ret i1 [[C]] ; - %A = sdiv exact i32 10, %Z - %B = sdiv exact i32 20, %Z + %A = sdiv exact nof i32 10, %Z + %B = sdiv exact nof i32 20, %Z %C = icmp ult i32 %A, %B ret i1 %C } define i1 @udiv3(i32 %X, i32 %Y) { ; CHECK-LABEL: @udiv3( - %A = udiv i32 %X, %Y + %A = udiv nof i32 %X, %Y %C = icmp ugt i32 %A, %X ret i1 %C ; CHECK: ret i1 false @@ -623,7 +623,7 @@ define i1 @udiv4(i32 %X, i32 %Y) { ; CHECK-LABEL: @udiv4( - %A = udiv i32 %X, %Y + %A = udiv nof i32 %X, %Y %C = icmp ule i32 %A, %X ret i1 %C ; CHECK: ret i1 true @@ -632,7 +632,7 @@ ; PR11340 define i1 @udiv6(i32 %X) nounwind { ; CHECK-LABEL: @udiv6( - %A = udiv i32 1, %X + %A = udiv nof i32 1, %X %C = icmp eq i32 %A, 0 ret i1 %C ; CHECK: ret i1 %C @@ -640,7 +640,7 @@ define i1 @udiv7(i32 %X, i32 %Y) { ; CHECK-LABEL: @udiv7( - %A = udiv i32 %X, %Y + %A = udiv nof i32 %X, %Y %C = icmp ult i32 %X, %A ret i1 %C ; CHECK: ret i1 false @@ -648,7 +648,7 @@ define i1 @udiv8(i32 %X, i32 %Y) { ; CHECK-LABEL: @udiv8( - %A = udiv i32 %X, %Y + %A = udiv nof i32 %X, %Y %C = icmp uge i32 %X, %A ret i1 %C ; CHECK: ret i1 true @@ -970,34 +970,34 @@ } define i1 @icmp_sdiv_int_min(i32 %a) { - %div = sdiv i32 -2147483648, %a + %div = sdiv nof i32 -2147483648, %a %cmp = icmp ne i32 %div, -1073741824 ret i1 %cmp ; CHECK-LABEL: @icmp_sdiv_int_min -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 -2147483648, %a +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 -2147483648, %a ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[DIV]], -1073741824 ; CHECK-NEXT: ret i1 [[CMP]] } define i1 @icmp_sdiv_pr20288(i64 %a) { - %div = sdiv i64 %a, -8589934592 + %div = sdiv nof i64 %a, -8589934592 %cmp = icmp ne i64 %div, 1073741824 ret i1 %cmp ; CHECK-LABEL: @icmp_sdiv_pr20288 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 %a, -8589934592 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i64 %a, -8589934592 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[DIV]], 1073741824 ; CHECK-NEXT: ret i1 [[CMP]] } define i1 @icmp_sdiv_neg1(i64 %a) { - %div = sdiv i64 %a, -1 + %div = sdiv nof i64 %a, -1 %cmp = icmp ne i64 %div, 1073741824 ret i1 %cmp ; CHECK-LABEL: @icmp_sdiv_neg1 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i64 %a, -1 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i64 %a, -1 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[DIV]], 1073741824 ; CHECK-NEXT: ret i1 [[CMP]] } Index: test/Transforms/InstSimplify/div.ll =================================================================== --- test/Transforms/InstSimplify/div.ll +++ test/Transforms/InstSimplify/div.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: @sdiv_zero_elt_vec_constfold( ; CHECK-NEXT: ret <2 x i8> undef ; - %div = sdiv <2 x i8> <i8 1, i8 2>, <i8 0, i8 -42> + %div = sdiv nof <2 x i8> <i8 1, i8 2>, <i8 0, i8 -42> ret <2 x i8> %div } @@ -14,7 +14,7 @@ ; CHECK-LABEL: @udiv_zero_elt_vec_constfold( ; CHECK-NEXT: ret <2 x i8> undef ; - %div = udiv <2 x i8> <i8 1, i8 2>, <i8 42, i8 0> + %div = udiv nof <2 x i8> <i8 1, i8 2>, <i8 42, i8 0> ret <2 x i8> %div } @@ -22,7 +22,7 @@ ; CHECK-LABEL: @sdiv_zero_elt_vec( ; CHECK-NEXT: ret <2 x i8> undef ; - %div = sdiv <2 x i8> %x, <i8 -42, i8 0> + %div = sdiv nof <2 x i8> %x, <i8 -42, i8 0> ret <2 x i8> %div } @@ -30,7 +30,7 @@ ; CHECK-LABEL: @udiv_zero_elt_vec( ; CHECK-NEXT: ret <2 x i8> undef ; - %div = udiv <2 x i8> %x, <i8 0, i8 42> + %div = udiv nof <2 x i8> %x, <i8 0, i8 42> ret <2 x i8> %div } @@ -42,7 +42,7 @@ ; CHECK-LABEL: @sdiv_bool_vec( ; CHECK-NEXT: ret <2 x i1> %x ; - %div = sdiv <2 x i1> %x, %y + %div = sdiv nof <2 x i1> %x, %y ret <2 x i1> %div } @@ -50,7 +50,7 @@ ; CHECK-LABEL: @udiv_bool_vec( ; CHECK-NEXT: ret <2 x i1> %x ; - %div = udiv <2 x i1> %x, %y + %div = udiv nof <2 x i1> %x, %y ret <2 x i1> %div } @@ -59,18 +59,18 @@ ; CHECK-NEXT: ret i32 0 ; %and = and i32 %x, 250 - %div = udiv i32 %and, 251 + %div = udiv nof i32 %and, 251 ret i32 %div } define i32 @not_udiv_dividend_known_smaller_than_constant_divisor(i32 %x) { ; CHECK-LABEL: @not_udiv_dividend_known_smaller_than_constant_divisor( ; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 251 -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[AND]], 251 +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i32 [[AND]], 251 ; CHECK-NEXT: ret i32 [[DIV]] ; %and = and i32 %x, 251 - %div = udiv i32 %and, 251 + %div = udiv nof i32 %and, 251 ret i32 %div } @@ -79,18 +79,18 @@ ; CHECK-NEXT: ret i32 0 ; %or = or i32 %x, 251 - %div = udiv i32 250, %or + %div = udiv nof i32 250, %or ret i32 %div } define i32 @not_udiv_constant_dividend_known_smaller_than_divisor(i32 %x) { ; CHECK-LABEL: @not_udiv_constant_dividend_known_smaller_than_divisor( ; CHECK-NEXT: [[OR:%.*]] = or i32 %x, 251 -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 251, [[OR]] +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i32 251, [[OR]] ; CHECK-NEXT: ret i32 [[DIV]] ; %or = or i32 %x, 251 - %div = udiv i32 251, %or + %div = udiv nof i32 251, %or ret i32 %div } @@ -100,12 +100,12 @@ ; CHECK-LABEL: @udiv_dividend_known_smaller_than_divisor( ; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 250 ; CHECK-NEXT: [[OR:%.*]] = or i32 %y, 251 -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[AND]], [[OR]] +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i32 [[AND]], [[OR]] ; CHECK-NEXT: ret i32 [[DIV]] ; %and = and i32 %x, 250 %or = or i32 %y, 251 - %div = udiv i32 %and, %or + %div = udiv nof i32 %and, %or ret i32 %div } @@ -113,12 +113,12 @@ ; CHECK-LABEL: @not_udiv_dividend_known_smaller_than_divisor( ; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 251 ; CHECK-NEXT: [[OR:%.*]] = or i32 %y, 251 -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[AND]], [[OR]] +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i32 [[AND]], [[OR]] ; CHECK-NEXT: ret i32 [[DIV]] ; %and = and i32 %x, 251 %or = or i32 %y, 251 - %div = udiv i32 %and, %or + %div = udiv nof i32 %and, %or ret i32 %div } @@ -130,7 +130,7 @@ ; CHECK-NEXT: ret i32 0 ; %call = call i32 @external(), !range !0 - %urem = udiv i32 %call, 3 + %urem = udiv nof i32 %call, 3 ret i32 %urem } Index: test/Transforms/InstSimplify/exact-nsw-nuw.ll =================================================================== --- test/Transforms/InstSimplify/exact-nsw-nuw.ll +++ test/Transforms/InstSimplify/exact-nsw-nuw.ll @@ -53,8 +53,8 @@ ; CHECK-LABEL: @div1( ; CHECK-NEXT: ret i32 0 ; - %A = udiv i32 %V, -2147483648 - %B = udiv i32 %A, -2147483648 + %A = udiv nof i32 %V, -2147483648 + %B = udiv nof i32 %A, -2147483648 ret i32 %B } @@ -62,8 +62,8 @@ ; CHECK-LABEL: @div2( ; CHECK-NEXT: ret i32 0 ; - %A = sdiv i32 %V, -1 - %B = sdiv i32 %A, -2147483648 + %A = sdiv nof i32 %V, -1 + %B = sdiv nof i32 %A, -2147483648 ret i32 %B } Index: test/Transforms/InstSimplify/gep.ll =================================================================== --- test/Transforms/InstSimplify/gep.ll +++ test/Transforms/InstSimplify/gep.ll @@ -8,7 +8,7 @@ %e_ptr = ptrtoint %struct.A* %e to i64 %b_ptr = ptrtoint %struct.A* %b to i64 %sub = sub i64 %e_ptr, %b_ptr - %sdiv = sdiv exact i64 %sub, 7 + %sdiv = sdiv exact nof i64 %sub, 7 %gep = getelementptr inbounds %struct.A, %struct.A* %b, i64 %sdiv ret %struct.A* %gep ; CHECK-LABEL: @test1 @@ -39,7 +39,7 @@ define %struct.A* @test4(%struct.A* %b) { %b_ptr = ptrtoint %struct.A* %b to i64 %sub = sub i64 0, %b_ptr - %sdiv = sdiv exact i64 %sub, 7 + %sdiv = sdiv exact nof i64 %sub, 7 %gep = getelementptr inbounds %struct.A, %struct.A* %b, i64 %sdiv ret %struct.A* %gep ; CHECK-LABEL: @test4 Index: test/Transforms/InstSimplify/icmp-constant.ll =================================================================== --- test/Transforms/InstSimplify/icmp-constant.ll +++ test/Transforms/InstSimplify/icmp-constant.ll @@ -73,12 +73,12 @@ ret <2 x i1> %B } -;'udiv C2, x' produces [0, C2] +;'udiv nof C2, x' produces [0, C2] define i1 @udiv5(i32 %X) { ; CHECK-LABEL: @udiv5( ; CHECK-NEXT: ret i1 false ; - %A = udiv i32 123, %X + %A = udiv nof i32 123, %X %C = icmp ugt i32 %A, 124 ret i1 %C } @@ -87,17 +87,17 @@ ; CHECK-LABEL: @udiv5_vec( ; CHECK-NEXT: ret <2 x i1> zeroinitializer ; - %A = udiv <2 x i32> <i32 123, i32 123>, %X + %A = udiv nof <2 x i32> <i32 123, i32 123>, %X %C = icmp ugt <2 x i32> %A, <i32 124, i32 124> ret <2 x i1> %C } -; 'udiv x, C2' produces [0, UINT_MAX / C2] +; 'udiv nof x, C2' produces [0, UINT_MAX / C2] define i1 @udiv1(i32 %X) { ; CHECK-LABEL: @udiv1( ; CHECK-NEXT: ret i1 true ; - %A = udiv i32 %X, 1000000 + %A = udiv nof i32 %X, 1000000 %B = icmp ult i32 %A, 5000 ret i1 %B } @@ -106,17 +106,17 @@ ; CHECK-LABEL: @udiv1_vec( ; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true> ; - %A = udiv <2 x i32> %X, <i32 1000000, i32 1000000> + %A = udiv nof <2 x i32> %X, <i32 1000000, i32 1000000> %B = icmp ult <2 x i32> %A, <i32 5000, i32 5000> ret <2 x i1> %B } -; 'sdiv C2, x' produces [-|C2|, |C2|] +; 'sdiv nof C2, x' produces [-|C2|, |C2|] define i1 @compare_dividend(i32 %a) { ; CHECK-LABEL: @compare_dividend( ; CHECK-NEXT: ret i1 false ; - %div = sdiv i32 2, %a + %div = sdiv nof i32 2, %a %cmp = icmp eq i32 %div, 3 ret i1 %cmp } @@ -125,18 +125,18 @@ ; CHECK-LABEL: @compare_dividend_vec( ; CHECK-NEXT: ret <2 x i1> zeroinitializer ; - %div = sdiv <2 x i32> <i32 2, i32 2>, %a + %div = sdiv nof <2 x i32> <i32 2, i32 2>, %a %cmp = icmp eq <2 x i32> %div, <i32 3, i32 3> ret <2 x i1> %cmp } -; 'sdiv x, C2' produces [INT_MIN / C2, INT_MAX / C2] +; 'sdiv nof x, C2' produces [INT_MIN / C2, INT_MAX / C2] ; where C2 != -1 and C2 != 0 and C2 != 1 define i1 @sdiv1(i32 %X) { ; CHECK-LABEL: @sdiv1( ; CHECK-NEXT: ret i1 true ; - %A = sdiv i32 %X, 1000000 + %A = sdiv nof i32 %X, 1000000 %B = icmp slt i32 %A, 3000 ret i1 %B } @@ -145,7 +145,7 @@ ; CHECK-LABEL: @sdiv1_vec( ; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true> ; - %A = sdiv <2 x i32> %X, <i32 1000000, i32 1000000> + %A = sdiv nof <2 x i32> %X, <i32 1000000, i32 1000000> %B = icmp slt <2 x i32> %A, <i32 3000, i32 3000> ret <2 x i1> %B } Index: test/Transforms/InstSimplify/reassociate.ll =================================================================== --- test/Transforms/InstSimplify/reassociate.ll +++ test/Transforms/InstSimplify/reassociate.ll @@ -112,19 +112,19 @@ ; ; (no overflow X * Y) / Y -> X %mul = mul nsw i32 %x, %y - %r = sdiv i32 %mul, %y + %r = sdiv nof i32 %mul, %y ret i32 %r } define i32 @sdiv2(i32 %x, i32 %y) { ; CHECK-LABEL: @sdiv2( -; CHECK: [[DIV:%.*]] = sdiv i32 %x, %y +; CHECK: [[DIV:%.*]] = sdiv nof i32 %x, %y ; CHECK-NEXT: ret i32 [[DIV]] ; ; (((X / Y) * Y) / Y) -> X / Y - %div = sdiv i32 %x, %y + %div = sdiv nof i32 %x, %y %mul = mul i32 %div, %y - %r = sdiv i32 %mul, %y + %r = sdiv nof i32 %mul, %y ret i32 %r } @@ -134,7 +134,7 @@ ; ; (X rem Y) / Y -> 0 %rem = srem i32 %x, %y - %div = sdiv i32 %rem, %y + %div = sdiv nof i32 %rem, %y ret i32 %div } @@ -143,7 +143,7 @@ ; CHECK: ret i32 %x ; ; (X / Y) * Y -> X if the division is exact - %div = sdiv exact i32 %x, %y + %div = sdiv exact nof i32 %x, %y %mul = mul i32 %div, %y ret i32 %mul } @@ -153,7 +153,7 @@ ; CHECK: ret i32 %x ; ; Y * (X / Y) -> X if the division is exact - %div = sdiv exact i32 %x, %y + %div = sdiv exact nof i32 %x, %y %mul = mul i32 %y, %div ret i32 %mul } @@ -165,19 +165,19 @@ ; ; (no overflow X * Y) / Y -> X %mul = mul nuw i32 %x, %y - %r = udiv i32 %mul, %y + %r = udiv nof i32 %mul, %y ret i32 %r } define i32 @udiv2(i32 %x, i32 %y) { ; CHECK-LABEL: @udiv2( -; CHECK: [[DIV:%.*]] = udiv i32 %x, %y +; CHECK: [[DIV:%.*]] = udiv nof i32 %x, %y ; CHECK-NEXT: ret i32 [[DIV]] ; ; (((X / Y) * Y) / Y) -> X / Y - %div = udiv i32 %x, %y + %div = udiv nof i32 %x, %y %mul = mul i32 %div, %y - %r = udiv i32 %mul, %y + %r = udiv nof i32 %mul, %y ret i32 %r } @@ -187,7 +187,7 @@ ; ; (X rem Y) / Y -> 0 %rem = urem i32 %x, %y - %div = udiv i32 %rem, %y + %div = udiv nof i32 %rem, %y ret i32 %div } @@ -196,7 +196,7 @@ ; CHECK: ret i32 %x ; ; (X / Y) * Y -> X if the division is exact - %div = udiv exact i32 %x, %y + %div = udiv exact nof i32 %x, %y %mul = mul i32 %div, %y ret i32 %mul } @@ -206,7 +206,7 @@ ; CHECK: ret i32 %x ; ; Y * (X / Y) -> X if the division is exact - %div = udiv exact i32 %x, %y + %div = udiv exact nof i32 %x, %y %mul = mul i32 %y, %div ret i32 %mul } Index: test/Transforms/InstSimplify/signed-div-rem.ll =================================================================== --- test/Transforms/InstSimplify/signed-div-rem.ll +++ test/Transforms/InstSimplify/signed-div-rem.ll @@ -5,18 +5,18 @@ ; CHECK-NEXT: ret i32 0 ; %conv = sext i8 %x to i32 - %div = sdiv i32 %conv, 129 + %div = sdiv nof i32 %conv, 129 ret i32 %div } define i32 @not_sdiv_sext_big_divisor(i8 %x) { ; CHECK-LABEL: @not_sdiv_sext_big_divisor( ; CHECK-NEXT: [[CONV:%.*]] = sext i8 %x to i32 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[CONV]], 128 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 [[CONV]], 128 ; CHECK-NEXT: ret i32 [[DIV]] ; %conv = sext i8 %x to i32 - %div = sdiv i32 %conv, 128 + %div = sdiv nof i32 %conv, 128 ret i32 %div } @@ -25,18 +25,18 @@ ; CHECK-NEXT: ret i32 0 ; %conv = sext i8 %x to i32 - %div = sdiv i32 %conv, -129 + %div = sdiv nof i32 %conv, -129 ret i32 %div } define i32 @not_sdiv_sext_small_divisor(i8 %x) { ; CHECK-LABEL: @not_sdiv_sext_small_divisor( ; CHECK-NEXT: [[CONV:%.*]] = sext i8 %x to i32 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[CONV]], -128 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 [[CONV]], -128 ; CHECK-NEXT: ret i32 [[DIV]] ; %conv = sext i8 %x to i32 - %div = sdiv i32 %conv, -128 + %div = sdiv nof i32 %conv, -128 ret i32 %div } @@ -45,18 +45,18 @@ ; CHECK-NEXT: ret i32 0 ; %conv = zext i8 %x to i32 - %div = sdiv i32 %conv, 256 + %div = sdiv nof i32 %conv, 256 ret i32 %div } define i32 @not_sdiv_zext_big_divisor(i8 %x) { ; CHECK-LABEL: @not_sdiv_zext_big_divisor( ; CHECK-NEXT: [[CONV:%.*]] = zext i8 %x to i32 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[CONV]], 255 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 [[CONV]], 255 ; CHECK-NEXT: ret i32 [[DIV]] ; %conv = zext i8 %x to i32 - %div = sdiv i32 %conv, 255 + %div = sdiv nof i32 %conv, 255 ret i32 %div } @@ -65,18 +65,18 @@ ; CHECK-NEXT: ret i32 0 ; %conv = zext i8 %x to i32 - %div = sdiv i32 %conv, -256 + %div = sdiv nof i32 %conv, -256 ret i32 %div } define i32 @not_sdiv_zext_small_divisor(i8 %x) { ; CHECK-LABEL: @not_sdiv_zext_small_divisor( ; CHECK-NEXT: [[CONV:%.*]] = zext i8 %x to i32 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[CONV]], -255 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 [[CONV]], -255 ; CHECK-NEXT: ret i32 [[DIV]] ; %conv = zext i8 %x to i32 - %div = sdiv i32 %conv, -255 + %div = sdiv nof i32 %conv, -255 ret i32 %div } @@ -85,18 +85,18 @@ ; CHECK-NEXT: ret i32 0 ; %and = and i32 %x, 253 - %div = sdiv i32 %and, 254 + %div = sdiv nof i32 %and, 254 ret i32 %div } define i32 @not_sdiv_dividend_known_smaller_than_pos_divisor_clear_bits(i32 %x) { ; CHECK-LABEL: @not_sdiv_dividend_known_smaller_than_pos_divisor_clear_bits( ; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 253 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[AND]], 253 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 [[AND]], 253 ; CHECK-NEXT: ret i32 [[DIV]] ; %and = and i32 %x, 253 - %div = sdiv i32 %and, 253 + %div = sdiv nof i32 %and, 253 ret i32 %div } @@ -105,18 +105,18 @@ ; CHECK-NEXT: ret i32 0 ; %and = and i32 %x, 253 - %div = sdiv i32 %and, -254 + %div = sdiv nof i32 %and, -254 ret i32 %div } define i32 @not_sdiv_dividend_known_smaller_than_neg_divisor_clear_bits(i32 %x) { ; CHECK-LABEL: @not_sdiv_dividend_known_smaller_than_neg_divisor_clear_bits( ; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 253 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[AND]], -253 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 [[AND]], -253 ; CHECK-NEXT: ret i32 [[DIV]] ; %and = and i32 %x, 253 - %div = sdiv i32 %and, -253 + %div = sdiv nof i32 %and, -253 ret i32 %div } @@ -125,18 +125,18 @@ ; CHECK-NEXT: ret i32 0 ; %or = or i32 %x, -253 - %div = sdiv i32 %or, 254 + %div = sdiv nof i32 %or, 254 ret i32 %div } define i32 @not_sdiv_dividend_known_smaller_than_pos_divisor_set_bits(i32 %x) { ; CHECK-LABEL: @not_sdiv_dividend_known_smaller_than_pos_divisor_set_bits( ; CHECK-NEXT: [[OR:%.*]] = or i32 %x, -253 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[OR]], 253 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 [[OR]], 253 ; CHECK-NEXT: ret i32 [[DIV]] ; %or = or i32 %x, -253 - %div = sdiv i32 %or, 253 + %div = sdiv nof i32 %or, 253 ret i32 %div } @@ -145,18 +145,18 @@ ; CHECK-NEXT: ret i32 0 ; %or = or i32 %x, -253 - %div = sdiv i32 %or, -254 + %div = sdiv nof i32 %or, -254 ret i32 %div } define i32 @not_sdiv_dividend_known_smaller_than_neg_divisor_set_bits(i32 %x) { ; CHECK-LABEL: @not_sdiv_dividend_known_smaller_than_neg_divisor_set_bits( ; CHECK-NEXT: [[OR:%.*]] = or i32 %x, -253 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[OR]], -253 +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 [[OR]], -253 ; CHECK-NEXT: ret i32 [[DIV]] ; %or = or i32 %x, -253 - %div = sdiv i32 %or, -253 + %div = sdiv nof i32 %or, -253 ret i32 %div } @@ -333,11 +333,11 @@ define i16 @sdiv_min_dividend(i8 %x) { ; CHECK-LABEL: @sdiv_min_dividend( ; CHECK-NEXT: [[Z:%.*]] = zext i8 %x to i16 -; CHECK-NEXT: [[D:%.*]] = sdiv i16 -32768, [[Z]] +; CHECK-NEXT: [[D:%.*]] = sdiv nof i16 -32768, [[Z]] ; CHECK-NEXT: ret i16 [[D]] ; %z = zext i8 %x to i16 - %d = sdiv i16 -32768, %z + %d = sdiv nof i16 -32768, %z ret i16 %d } @@ -348,7 +348,7 @@ ; CHECK-NEXT: ret i16 0 ; %z = zext i8 %x to i16 - %d = sdiv i16 %z, -32768 + %d = sdiv nof i16 %z, -32768 ret i16 %d } Index: test/Transforms/InstSimplify/undef.ll =================================================================== --- test/Transforms/InstSimplify/undef.ll +++ test/Transforms/InstSimplify/undef.ll @@ -60,7 +60,7 @@ ; CHECK-LABEL: @test7( ; CHECK: ret i64 undef ; - %r = udiv i64 undef, 1 + %r = udiv nof i64 undef, 1 ret i64 %r } @@ -68,7 +68,7 @@ ; CHECK-LABEL: @test8( ; CHECK: ret i64 undef ; - %r = sdiv i64 undef, 1 + %r = sdiv nof i64 undef, 1 ret i64 %r } @@ -189,7 +189,7 @@ ; CHECK-LABEL: @test20( ; CHECK: ret i32 undef ; - %b = udiv i32 %a, 0 + %b = udiv nof i32 %a, 0 ret i32 %b } @@ -197,7 +197,7 @@ ; CHECK-LABEL: @test20vec( ; CHECK-NEXT: ret <2 x i32> undef ; - %b = udiv <2 x i32> %a, zeroinitializer + %b = udiv nof <2 x i32> %a, zeroinitializer ret <2 x i32> %b } @@ -205,7 +205,7 @@ ; CHECK-LABEL: @test21( ; CHECK: ret i32 undef ; - %b = sdiv i32 %a, 0 + %b = sdiv nof i32 %a, 0 ret i32 %b } @@ -213,7 +213,7 @@ ; CHECK-LABEL: @test21vec( ; CHECK-NEXT: ret <2 x i32> undef ; - %b = sdiv <2 x i32> %a, zeroinitializer + %b = sdiv nof <2 x i32> %a, zeroinitializer ret <2 x i32> %b } @@ -237,7 +237,7 @@ ; CHECK-LABEL: @test24( ; CHECK: ret i32 undef ; - %b = udiv i32 undef, 0 + %b = udiv nof i32 undef, 0 ret i32 %b } @@ -341,7 +341,7 @@ ; CHECK-LABEL: @test37( ; CHECK: ret i32 undef ; - %b = udiv i32 undef, undef + %b = udiv nof i32 undef, undef ret i32 %b } @@ -349,7 +349,7 @@ ; CHECK-LABEL: @test38( ; CHECK: ret i32 undef ; - %b = udiv i32 %a, undef + %b = udiv nof i32 %a, undef ret i32 %b } @@ -357,6 +357,6 @@ ; CHECK-LABEL: @test39( ; CHECK: ret i32 undef ; - %b = udiv i32 0, undef + %b = udiv nof i32 0, undef ret i32 %b } Index: test/Transforms/JumpThreading/pr9331.ll =================================================================== --- test/Transforms/JumpThreading/pr9331.ll +++ test/Transforms/JumpThreading/pr9331.ll @@ -25,7 +25,7 @@ br i1 %tobool.i, label %cond.false.i, label %safe_mod_func_uint64_t_u_u.exit cond.false.i: ; preds = %if.end21 - %div.i = udiv i64 %xor, %conv23 + %div.i = udiv nof i64 %xor, %conv23 br label %safe_mod_func_uint64_t_u_u.exit safe_mod_func_uint64_t_u_u.exit: ; preds = %cond.false.i, %if.end21 Index: test/Transforms/LICM/hoist-nounwind.ll =================================================================== --- test/Transforms/LICM/hoist-nounwind.ll +++ test/Transforms/LICM/hoist-nounwind.ll @@ -35,11 +35,11 @@ br i1 %cmp4, label %for.body, label %for.cond.cleanup ; CHECK: tail call void @f() -; CHECK-NEXT: sdiv i32 +; CHECK-NEXT: sdiv nof i32 for.body: %i.05 = phi i32 [ %inc, %for.body ], [ 0, %entry ] tail call void @f() nounwind - %div = sdiv i32 5, %c + %div = sdiv nof i32 5, %c %add = add i32 %i.05, 1 %inc = add i32 %add, %div %cmp = icmp slt i32 %inc, %N Index: test/Transforms/LICM/hoisting.ll =================================================================== --- test/Transforms/LICM/hoisting.ll +++ test/Transforms/LICM/hoisting.ll @@ -19,8 +19,8 @@ IfUnEqual: ; preds = %Loop ; CHECK: IfUnEqual: -; CHECK-NEXT: sdiv i32 4, %A - %B1 = sdiv i32 4, %A ; <i32> [#uses=1] +; CHECK-NEXT: sdiv nof i32 4, %A + %B1 = sdiv nof i32 4, %A ; <i32> [#uses=1] br label %LoopTail LoopTail: ; preds = %IfUnEqual, %Loop @@ -39,13 +39,13 @@ define i32 @test2(i1 %c) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: load i32, i32* @X -; CHECK-NEXT: %B = sdiv i32 4, %A +; CHECK-NEXT: %B = sdiv nof i32 4, %A %A = load i32, i32* @X br label %Loop Loop: ;; Should have hoisted this div! - %B = sdiv i32 4, %A + %B = sdiv nof i32 4, %A br label %loop2 loop2: @@ -85,7 +85,7 @@ %i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ] %n.01 = phi i32 [ 0, %entry ], [ %add, %for.body ] call void @foo_may_call_exit(i32 0) - %div = sdiv i32 %x, %y + %div = sdiv nof i32 %x, %y %add = add nsw i32 %n.01, %div %inc = add nsw i32 %i.02, 1 %cmp = icmp slt i32 %inc, 10000 Index: test/Transforms/LICM/preheader-safe.ll =================================================================== --- test/Transforms/LICM/preheader-safe.ll +++ test/Transforms/LICM/preheader-safe.ll @@ -7,14 +7,14 @@ define void @nothrow(i64 %x, i64 %y, i1* %cond) { ; CHECK-LABEL: nothrow ; CHECK-LABEL: entry -; CHECK: %div = udiv i64 %x, %y +; CHECK: %div = udiv nof i64 %x, %y ; CHECK-LABEL: loop ; CHECK: call void @use_nothrow(i64 %div) entry: br label %loop loop: ; preds = %entry, %for.inc - %div = udiv i64 %x, %y + %div = udiv nof i64 %x, %y br label %loop2 loop2: @@ -25,13 +25,13 @@ define void @throw_header(i64 %x, i64 %y, i1* %cond) { ; CHECK-LABEL: throw_header ; CHECK-LABEL: loop -; CHECK: %div = udiv i64 %x, %y +; CHECK: %div = udiv nof i64 %x, %y ; CHECK: call void @use(i64 %div) entry: br label %loop loop: ; preds = %entry, %for.inc - %div = udiv i64 %x, %y + %div = udiv nof i64 %x, %y call void @use(i64 %div) br label %loop } @@ -41,13 +41,13 @@ define void @nothrow_header(i64 %x, i64 %y, i1 %cond) { ; CHECK-LABEL: nothrow_header ; CHECK-LABEL: entry -; CHECK: %div = udiv i64 %x, %y +; CHECK: %div = udiv nof i64 %x, %y ; CHECK-LABEL: loop ; CHECK: call void @use(i64 %div) entry: br label %loop loop: ; preds = %entry, %for.inc - %div = udiv i64 %x, %y + %div = udiv nof i64 %x, %y br i1 %cond, label %loop-if, label %exit loop-if: call void @use(i64 %div) @@ -60,14 +60,14 @@ ; CHECK-LABEL: nothrow_header_neg ; CHECK-LABEL: entry ; CHECK-LABEL: loop -; CHECK: %div = udiv i64 %x, %y +; CHECK: %div = udiv nof i64 %x, %y ; CHECK: call void @use(i64 %div) entry: br label %loop loop: ; preds = %entry, %for.inc br label %loop-if loop-if: - %div = udiv i64 %x, %y + %div = udiv nof i64 %x, %y call void @use(i64 %div) br label %loop } Index: test/Transforms/LICM/sinking.ll =================================================================== --- test/Transforms/LICM/sinking.ll +++ test/Transforms/LICM/sinking.ll @@ -224,7 +224,7 @@ br label %Loop Loop: ; preds = %Loop, %Entry %N_addr.0.pn = phi i32 [ %dec, %Loop ], [ %N, %Entry ] ; <i32> [#uses=3] - %tmp.6 = sdiv i32 %N, %N_addr.0.pn ; <i32> [#uses=1] + %tmp.6 = sdiv nof i32 %N, %N_addr.0.pn ; <i32> [#uses=1] %dec = add i32 %N_addr.0.pn, -1 ; <i32> [#uses=1] %tmp.1 = icmp ne i32 %N_addr.0.pn, 0 ; <i1> [#uses=1] br i1 %tmp.1, label %Loop, label %Out @@ -234,7 +234,7 @@ ; CHECK-LABEL: @test10( ; CHECK: Out: ; CHECK-NEXT: %[[LCSSAPHI:.*]] = phi i32 [ %N_addr.0.pn -; CHECK-NEXT: %tmp.6.le = sdiv i32 %N, %[[LCSSAPHI]] +; CHECK-NEXT: %tmp.6.le = sdiv nof i32 %N, %[[LCSSAPHI]] ; CHECK-NEXT: ret i32 %tmp.6.le } Index: test/Transforms/LICM/speculate.ll =================================================================== --- test/Transforms/LICM/speculate.ll +++ test/Transforms/LICM/speculate.ll @@ -4,7 +4,7 @@ ; UDiv is safe to speculate if the denominator is known non-zero. ; CHECK-LABEL: @safe_udiv( -; CHECK: %div = udiv i64 %x, 2 +; CHECK: %div = udiv nof i64 %x, 2 ; CHECK-NEXT: br label %for.body define void @safe_udiv(i64 %x, i64 %m, i64 %n, i32* %p, i64* %q) nounwind { @@ -19,7 +19,7 @@ br i1 %tobool, label %for.inc, label %if.then if.then: ; preds = %for.body - %div = udiv i64 %x, 2 + %div = udiv nof i64 %x, 2 %arrayidx1 = getelementptr inbounds i64, i64* %q, i64 %i.02 store i64 %div, i64* %arrayidx1, align 8 br label %for.inc @@ -51,7 +51,7 @@ br i1 %tobool, label %for.inc, label %if.then if.then: ; preds = %for.body - %div = udiv i64 %x, %m + %div = udiv nof i64 %x, %m %arrayidx1 = getelementptr inbounds i64, i64* %q, i64 %i.02 store i64 %div, i64* %arrayidx1, align 8 br label %for.inc @@ -69,7 +69,7 @@ ; known to have at least one zero bit. ; CHECK-LABEL: @safe_sdiv( -; CHECK: %div = sdiv i64 %x, 2 +; CHECK: %div = sdiv nof i64 %x, 2 ; CHECK-NEXT: br label %for.body define void @safe_sdiv(i64 %x, i64 %m, i64 %n, i32* %p, i64* %q) nounwind { @@ -85,7 +85,7 @@ br i1 %tobool, label %for.inc, label %if.then if.then: ; preds = %for.body - %div = sdiv i64 %x, 2 + %div = sdiv nof i64 %x, 2 %arrayidx1 = getelementptr inbounds i64, i64* %q, i64 %i.02 store i64 %div, i64* %arrayidx1, align 8 br label %for.inc @@ -118,7 +118,7 @@ br i1 %tobool, label %for.inc, label %if.then if.then: ; preds = %for.body - %div = sdiv i64 %x, %or + %div = sdiv nof i64 %x, %or %arrayidx1 = getelementptr inbounds i64, i64* %q, i64 %i.02 store i64 %div, i64* %arrayidx1, align 8 br label %for.inc @@ -151,7 +151,7 @@ br i1 %tobool, label %for.inc, label %if.then if.then: ; preds = %for.body - %div = sdiv i64 %x, %and + %div = sdiv nof i64 %x, %and %arrayidx1 = getelementptr inbounds i64, i64* %q, i64 %i.02 store i64 %div, i64* %arrayidx1, align 8 br label %for.inc @@ -179,7 +179,7 @@ br i1 %c, label %backedge, label %if.then if.then: - %d = sdiv i64 %a, %b + %d = sdiv nof i64 %a, %b store i64 %d, i64* %p br label %backedge Index: test/Transforms/LoopDeletion/multiple-exits.ll =================================================================== --- test/Transforms/LoopDeletion/multiple-exits.ll +++ test/Transforms/LoopDeletion/multiple-exits.ll @@ -66,7 +66,7 @@ %t2 = icmp slt i64 %x.0, %m ; This unused division prevents unifying this loop exit path with others ; because it can be deleted but cannot be hoisted. - %unused1 = udiv i64 42, %maybe_zero + %unused1 = udiv nof i64 42, %maybe_zero br i1 %t2, label %bb3, label %return ; BEFORE: bb2: ; BEFORE: br i1 {{.*}}, label %bb3, label %return @@ -77,7 +77,7 @@ %t3 = icmp slt i64 %x.0, %m ; This unused division prevents unifying this loop exit path with others ; because it can be deleted but cannot be hoisted. - %unused2 = sdiv i64 42, %maybe_zero + %unused2 = sdiv nof i64 42, %maybe_zero br i1 %t3, label %bb, label %return ; BEFORE: bb3: ; BEFORE: br i1 {{.*}}, label %bb, label %return @@ -115,7 +115,7 @@ %t2 = icmp slt i64 %x.0, %m ; This unused division prevents unifying this loop exit path with others ; because it can be deleted but cannot be hoisted. - %unused1 = udiv i64 42, %maybe_zero + %unused1 = udiv nof i64 42, %maybe_zero br i1 %t2, label %bb3, label %return ; CHECK: bb2: ; CHECK: br i1 {{.*}}, label %bb3, label %return @@ -124,7 +124,7 @@ %t3 = icmp slt i64 %x.0, %m ; This unused division prevents unifying this loop exit path with others ; because it can be deleted but cannot be hoisted. - %unused2 = sdiv i64 42, %maybe_zero + %unused2 = sdiv nof i64 42, %maybe_zero br i1 %t3, label %bb, label %return ; CHECK: bb3: ; CHECK: br i1 {{.*}}, label %bb, label %return Index: test/Transforms/LoopDeletion/unreachable-loops.ll =================================================================== --- test/Transforms/LoopDeletion/unreachable-loops.ll +++ test/Transforms/LoopDeletion/unreachable-loops.ll @@ -74,12 +74,12 @@ bb2: %t2 = icmp slt i64 %x.0, %m - %unused1 = udiv i64 42, %maybe_zero + %unused1 = udiv nof i64 42, %maybe_zero br i1 %t2, label %bb3, label %return bb3: %t3 = icmp slt i64 %x.0, %m - %unused2 = sdiv i64 42, %maybe_zero + %unused2 = sdiv nof i64 42, %maybe_zero br i1 %t3, label %bb, label %return return: Index: test/Transforms/LoopIdiom/unsafe.ll =================================================================== --- test/Transforms/LoopIdiom/unsafe.ll +++ test/Transforms/LoopIdiom/unsafe.ll @@ -1,7 +1,7 @@ ; RUN: opt -S < %s -loop-idiom | FileCheck %s ; CHECK-NOT: memset ; check that memset is not generated (for stores) because that will result -; in udiv hoisted out of the loop by the SCEV Expander +; in udiv nof hoisted out of the loop by the SCEV Expander ; TODO: ideally we should be able to generate memset ; if SCEV expander is taught to generate the dependencies ; at the right point. @@ -17,7 +17,7 @@ br label %for.cond1thread-pre-split for.cond1thread-pre-split: ; preds = %for.body5, %entry - %div = udiv i32 %d0, %d1 + %div = udiv nof i32 %d0, %d1 br label %for.body5 for.body5: ; preds = %for.body5, %for.cond1thread-pre-split @@ -40,7 +40,7 @@ br label %for.body for.body: ; preds = %for.body6, %entry - %div = udiv i32 %a, %b + %div = udiv nof i32 %a, %b %conv = zext i32 %div to i64 br label %for.body6 Index: test/Transforms/LoopPredication/basic.ll =================================================================== --- test/Transforms/LoopPredication/basic.ll +++ test/Transforms/LoopPredication/basic.ll @@ -1004,12 +1004,12 @@ ; CHECK: loop: ; CHECK-NEXT: %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ] ; CHECK-NEXT: %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ] -; CHECK-NEXT: %length.udiv = udiv i32 %length, %divider +; CHECK-NEXT: %length.udiv = udiv nof i32 %length, %divider ; CHECK-NEXT: %within.bounds = icmp ult i32 %i, %length.udiv ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ] %loop.acc = phi i32 [ %loop.acc.next, %loop ], [ 0, %loop.preheader ] %i = phi i32 [ %i.next, %loop ], [ 0, %loop.preheader ] - %length.udiv = udiv i32 %length, %divider + %length.udiv = udiv nof i32 %length, %divider %within.bounds = icmp ult i32 %i, %length.udiv call void (i1, ...) @llvm.experimental.guard(i1 %within.bounds, i32 9) [ "deopt"() ] Index: test/Transforms/LoopPredication/nested.ll =================================================================== --- test/Transforms/LoopPredication/nested.ll +++ test/Transforms/LoopPredication/nested.ll @@ -178,7 +178,7 @@ %outer.loop.acc = phi i32 [ %outer.loop.acc.next, %outer.loop.inc ], [ 0, %outer.loop.preheader ] %i = phi i32 [ %i.next, %outer.loop.inc ], [ 0, %outer.loop.preheader ] %tmp6 = icmp sle i32 %l, 0 - %div = udiv i32 %i, %maybezero + %div = udiv nof i32 %i, %maybezero br i1 %tmp6, label %outer.loop.inc, label %inner.loop.preheader inner.loop.preheader: Index: test/Transforms/LoopStrengthReduce/2012-01-02-nopreheader.ll =================================================================== --- test/Transforms/LoopStrengthReduce/2012-01-02-nopreheader.ll +++ test/Transforms/LoopStrengthReduce/2012-01-02-nopreheader.ll @@ -18,7 +18,7 @@ ; CHECK-NOT: phi float* define void @nopreheader(float* nocapture %a, i32 %n) nounwind { entry: - %0 = sdiv i32 %n, undef + %0 = sdiv nof i32 %n, undef indirectbr i8* undef, [label %bb10.preheader] bb10.preheader: ; preds = %bb4 Index: test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll =================================================================== --- test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll +++ test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll @@ -52,7 +52,7 @@ br label %for.cond.i.i.us for.body.i.i.us: ; preds = %codeRepl5.us - %div.i.i.i.us = udiv i32 1, %conv.i.i + %div.i.i.i.us = udiv nof i32 1, %conv.i.i %cmp5.i.i.us = icmp eq i32 %div.i.i.i.us, %tmp2 br i1 %cmp5.i.i.us, label %codeRepl.loopexit.us-lcssa.us, label %for.inc.i.i.us Index: test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll =================================================================== --- test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll +++ test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll @@ -36,7 +36,7 @@ %tmp51 = sub i64 32, %0 %incdec.ptr = getelementptr [33 x i16], [33 x i16]* %buffer, i64 0, i64 %tmp51 %rem = urem i32 %i.addr.0, 10 - %div = udiv i32 %i.addr.0, 10 + %div = udiv nof i32 %i.addr.0, 10 %idxprom = zext i32 %rem to i64 %arrayidx = getelementptr inbounds [37 x i8], [37 x i8]* @.str, i64 0, i64 %idxprom %tmp5 = load i8, i8* %arrayidx, align 1 Index: test/Transforms/LoopStrengthReduce/pr2570.ll =================================================================== --- test/Transforms/LoopStrengthReduce/pr2570.ll +++ test/Transforms/LoopStrengthReduce/pr2570.ll @@ -98,7 +98,7 @@ zext i1 %42 to i16 ; <i16>:43 [#uses=1] tail call i32 @func_74( i16 zeroext 23618, i8 zeroext -29, i16 zeroext %43, i16 zeroext 1 ) nounwind ; <i32>:44 [#uses=2] tail call i32 @func_103( i16 zeroext -869 ) nounwind ; <i32>:45 [#uses=0] - udiv i32 %44, 34162 ; <i32>:46 [#uses=1] + udiv nof i32 %44, 34162 ; <i32>:46 [#uses=1] icmp ult i32 %44, 34162 ; <i1>:47 [#uses=1] %.0331 = select i1 %47, i32 1, i32 %46 ; <i32> [#uses=1] urem i32 293685862, %.0331 ; <i32>:48 [#uses=1] @@ -110,7 +110,7 @@ zext i16 %p_48 to i32 ; <i32>:51 [#uses=1] icmp eq i16 %p_48, 0 ; <i1>:52 [#uses=1] %.0329 = select i1 %52, i32 1, i32 %51 ; <i32> [#uses=1] - udiv i32 -1, %.0329 ; <i32>:53 [#uses=1] + udiv nof i32 -1, %.0329 ; <i32>:53 [#uses=1] icmp eq i32 %53, 0 ; <i1>:54 [#uses=1] br i1 %54, label %bb222, label %bb223 @@ -162,7 +162,7 @@ bb241: ; preds = %bb223 store i16 -9, i16* @g_221, align 2 - udiv i32 %p_52, 1538244727 ; <i32>:90 [#uses=1] + udiv nof i32 %p_52, 1538244727 ; <i32>:90 [#uses=1] load i32, i32* @g_207, align 4 ; <i32>:91 [#uses=1] sub i32 %91, %90 ; <i32>:92 [#uses=1] load i32, i32* @g_14, align 4 ; <i32>:93 [#uses=1] @@ -229,7 +229,7 @@ bb279: ; preds = %bb274.split icmp eq i32 %120, 0 ; <i1>:122 [#uses=1] %.0317 = select i1 %122, i32 1, i32 %120 ; <i32> [#uses=1] - udiv i32 -8, %.0317 ; <i32>:123 [#uses=1] + udiv nof i32 -8, %.0317 ; <i32>:123 [#uses=1] trunc i32 %123 to i16 ; <i16>:124 [#uses=1] br label %bb284 Index: test/Transforms/LoopUnroll/high-cost-trip-count-computation.ll =================================================================== --- test/Transforms/LoopUnroll/high-cost-trip-count-computation.ll +++ test/Transforms/LoopUnroll/high-cost-trip-count-computation.ll @@ -37,10 +37,10 @@ ; CHECK-LABEL: for.body entry: %rem0 = load i64, i64* %loc, align 8 - %ExpensiveComputation = udiv i64 %rem0, 42 ; <<< Extra computations are added to the trip-count expression + %ExpensiveComputation = udiv nof i64 %rem0, 42 ; <<< Extra computations are added to the trip-count expression br label %bb1 bb1: - %div11 = udiv i64 %ExpensiveComputation, %conv7 + %div11 = udiv nof i64 %ExpensiveComputation, %conv7 %cmp.i38 = icmp ugt i64 %div11, 1 %div12 = select i1 %cmp.i38, i64 %div11, i64 1 br label %for.body Index: test/Transforms/LoopVectorize/AArch64/aarch64-predication.ll =================================================================== --- test/Transforms/LoopVectorize/AArch64/aarch64-predication.ll +++ test/Transforms/LoopVectorize/AArch64/aarch64-predication.ll @@ -12,7 +12,7 @@ ; %tmp4 a lower scalarization overhead. ; ; COST-LABEL: predicated_udiv_scalarized_operand -; COST: LV: Found an estimated cost of 4 for VF 2 For instruction: %tmp4 = udiv i64 %tmp2, %tmp3 +; COST: LV: Found an estimated cost of 4 for VF 2 For instruction: %tmp4 = udiv nof i64 %tmp2, %tmp3 ; ; CHECK-LABEL: @predicated_udiv_scalarized_operand( ; CHECK: vector.body: @@ -28,7 +28,7 @@ ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 0 ; CHECK-NEXT: [[TMP5:%.*]] = add nsw i64 [[TMP4]], %x ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 0 -; CHECK-NEXT: [[TMP7:%.*]] = udiv i64 [[TMP6]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = udiv nof i64 [[TMP6]], [[TMP5]] ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> undef, i64 [[TMP7]], i32 0 ; CHECK-NEXT: br label %[[PRED_UDIV_CONTINUE]] ; CHECK: [[PRED_UDIV_CONTINUE]]: @@ -39,7 +39,7 @@ ; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 1 ; CHECK-NEXT: [[TMP12:%.*]] = add nsw i64 [[TMP11]], %x ; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i64> [[WIDE_LOAD]], i32 1 -; CHECK-NEXT: [[TMP14:%.*]] = udiv i64 [[TMP13]], [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = udiv nof i64 [[TMP13]], [[TMP12]] ; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x i64> [[TMP9]], i64 [[TMP14]], i32 1 ; CHECK-NEXT: br label %[[PRED_UDIV_CONTINUE2]] ; CHECK: [[PRED_UDIV_CONTINUE2]]: @@ -63,7 +63,7 @@ if.then: %tmp3 = add nsw i64 %tmp2, %x - %tmp4 = udiv i64 %tmp2, %tmp3 + %tmp4 = udiv nof i64 %tmp2, %tmp3 br label %for.inc for.inc: Index: test/Transforms/LoopVectorize/AArch64/predication_costs.ll =================================================================== --- test/Transforms/LoopVectorize/AArch64/predication_costs.ll +++ test/Transforms/LoopVectorize/AArch64/predication_costs.ll @@ -18,8 +18,8 @@ ; Cost of udiv: ; (udiv(2) + extractelement(6) + insertelement(3)) / 2 = 5 ; -; CHECK: Scalarizing and predicating: %tmp4 = udiv i32 %tmp2, %tmp3 -; CHECK: Found an estimated cost of 5 for VF 2 For instruction: %tmp4 = udiv i32 %tmp2, %tmp3 +; CHECK: Scalarizing and predicating: %tmp4 = udiv nof i32 %tmp2, %tmp3 +; CHECK: Found an estimated cost of 5 for VF 2 For instruction: %tmp4 = udiv nof i32 %tmp2, %tmp3 ; define i32 @predicated_udiv(i32* %a, i32* %b, i1 %c, i64 %n) { entry: @@ -35,7 +35,7 @@ br i1 %c, label %if.then, label %for.inc if.then: - %tmp4 = udiv i32 %tmp2, %tmp3 + %tmp4 = udiv nof i32 %tmp2, %tmp3 br label %for.inc for.inc: @@ -99,9 +99,9 @@ ; (udiv(2) + extractelement(3) + insertelement(3)) / 2 = 4 ; ; CHECK: Scalarizing: %tmp3 = add nsw i32 %tmp2, %x -; CHECK: Scalarizing and predicating: %tmp4 = udiv i32 %tmp2, %tmp3 +; CHECK: Scalarizing and predicating: %tmp4 = udiv nof i32 %tmp2, %tmp3 ; CHECK: Found an estimated cost of 2 for VF 2 For instruction: %tmp3 = add nsw i32 %tmp2, %x -; CHECK: Found an estimated cost of 4 for VF 2 For instruction: %tmp4 = udiv i32 %tmp2, %tmp3 +; CHECK: Found an estimated cost of 4 for VF 2 For instruction: %tmp4 = udiv nof i32 %tmp2, %tmp3 ; define i32 @predicated_udiv_scalarized_operand(i32* %a, i1 %c, i32 %x, i64 %n) { entry: @@ -116,7 +116,7 @@ if.then: %tmp3 = add nsw i32 %tmp2, %x - %tmp4 = udiv i32 %tmp2, %tmp3 + %tmp4 = udiv nof i32 %tmp2, %tmp3 br label %for.inc for.inc: @@ -177,7 +177,7 @@ ; This test checks that we correctly compute the cost of multiple predicated ; instructions in the same block. The sdiv, udiv, and store must be scalarized ; and predicated. The sub feeding the store is scalarized and sunk inside the -; store's predicated block. However, the add feeding the sdiv and udiv cannot +; store's predicated block. However, the add feeding the sdiv nof and udiv nof cannot ; be sunk and is not scalarized. If we assume the block probability is 50%, we ; compute the cost as: ; @@ -193,13 +193,13 @@ ; store(4) / 2 = 2 ; ; CHECK-NOT: Scalarizing: %tmp2 = add i32 %tmp1, %x -; CHECK: Scalarizing and predicating: %tmp3 = sdiv i32 %tmp1, %tmp2 -; CHECK: Scalarizing and predicating: %tmp4 = udiv i32 %tmp3, %tmp2 +; CHECK: Scalarizing and predicating: %tmp3 = sdiv nof i32 %tmp1, %tmp2 +; CHECK: Scalarizing and predicating: %tmp4 = udiv nof i32 %tmp3, %tmp2 ; CHECK: Scalarizing: %tmp5 = sub i32 %tmp4, %x ; CHECK: Scalarizing and predicating: store i32 %tmp5, i32* %tmp0, align 4 ; CHECK: Found an estimated cost of 1 for VF 2 For instruction: %tmp2 = add i32 %tmp1, %x -; CHECK: Found an estimated cost of 5 for VF 2 For instruction: %tmp3 = sdiv i32 %tmp1, %tmp2 -; CHECK: Found an estimated cost of 5 for VF 2 For instruction: %tmp4 = udiv i32 %tmp3, %tmp2 +; CHECK: Found an estimated cost of 5 for VF 2 For instruction: %tmp3 = sdiv nof i32 %tmp1, %tmp2 +; CHECK: Found an estimated cost of 5 for VF 2 For instruction: %tmp4 = udiv nof i32 %tmp3, %tmp2 ; CHECK: Found an estimated cost of 2 for VF 2 For instruction: %tmp5 = sub i32 %tmp4, %x ; CHECK: Found an estimated cost of 2 for VF 2 For instruction: store i32 %tmp5, i32* %tmp0, align 4 ; @@ -215,8 +215,8 @@ if.then: %tmp2 = add i32 %tmp1, %x - %tmp3 = sdiv i32 %tmp1, %tmp2 - %tmp4 = udiv i32 %tmp3, %tmp2 + %tmp3 = sdiv nof i32 %tmp1, %tmp2 + %tmp4 = udiv nof i32 %tmp3, %tmp2 %tmp5 = sub i32 %tmp4, %x store i32 %tmp5, i32* %tmp0, align 4 br label %for.inc Index: test/Transforms/LoopVectorize/AArch64/sdiv-pow2.ll =================================================================== --- test/Transforms/LoopVectorize/AArch64/sdiv-pow2.ll +++ test/Transforms/LoopVectorize/AArch64/sdiv-pow2.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: @foo( ; CHECK: load <4 x i32>, <4 x i32>* -; CHECK: sdiv <4 x i32> +; CHECK: sdiv nof <4 x i32> ; CHECK: store <4 x i32> define void @foo(){ @@ -19,7 +19,7 @@ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %arrayidx = getelementptr inbounds %struct.anon, %struct.anon* @Foo, i64 0, i32 2, i64 %indvars.iv %0 = load i32, i32* %arrayidx, align 4 - %div = sdiv i32 %0, 2 + %div = sdiv nof i32 %0, 2 %arrayidx2 = getelementptr inbounds %struct.anon, %struct.anon* @Foo, i64 0, i32 0, i64 %indvars.iv store i32 %div, i32* %arrayidx2, align 4 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 Index: test/Transforms/LoopVectorize/X86/masked_load_store.ll =================================================================== --- test/Transforms/LoopVectorize/X86/masked_load_store.ll +++ test/Transforms/LoopVectorize/X86/masked_load_store.ll @@ -476,7 +476,7 @@ %idxprom6 = sext i32 %10 to i64 %11 = load i32*, i32** %A.addr, align 8 %arrayidx7 = getelementptr inbounds i32, i32* %11, i64 %idxprom6 - store i32 sdiv (i32 1, i32 zext (i1 icmp eq (i32** getelementptr inbounds ([1 x i32*], [1 x i32*]* @a, i64 0, i64 1), i32** @c) to i32)), i32* %arrayidx7, align 4 + store i32 sdiv nof (i32 1, i32 zext (i1 icmp eq (i32** getelementptr inbounds ([1 x i32*], [1 x i32*]* @a, i64 0, i64 1), i32** @c) to i32)), i32* %arrayidx7, align 4 br label %if.end if.end: ; preds = %if.then, %for.body Index: test/Transforms/LoopVectorize/X86/powof2div.ll =================================================================== --- test/Transforms/LoopVectorize/X86/powof2div.ll +++ test/Transforms/LoopVectorize/X86/powof2div.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: @foo( ; CHECK: load <4 x i32>, <4 x i32>* -; CHECK: sdiv <4 x i32> +; CHECK: sdiv nof <4 x i32> ; CHECK: store <4 x i32> define void @foo(){ @@ -19,7 +19,7 @@ %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %arrayidx = getelementptr inbounds %struct.anon, %struct.anon* @Foo, i64 0, i32 2, i64 %indvars.iv %0 = load i32, i32* %arrayidx, align 4 - %div = sdiv i32 %0, 2 + %div = sdiv nof i32 %0, 2 %arrayidx2 = getelementptr inbounds %struct.anon, %struct.anon* @Foo, i64 0, i32 0, i64 %indvars.iv store i32 %div, i32* %arrayidx2, align 4 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 Index: test/Transforms/LoopVectorize/X86/x86-predication.ll =================================================================== --- test/Transforms/LoopVectorize/X86/x86-predication.ll +++ test/Transforms/LoopVectorize/X86/x86-predication.ll @@ -15,7 +15,7 @@ ; CHECK: br i1 {{.*}}, label %[[IF0:.+]], label %[[CONT0:.+]] ; CHECK: [[IF0]]: ; CHECK: %[[T0:.+]] = extractelement <2 x i32> %wide.masked.load, i32 0 -; CHECK: %[[T1:.+]] = sdiv i32 %[[T0]], %x +; CHECK: %[[T1:.+]] = sdiv nof i32 %[[T0]], %x ; CHECK: %[[T2:.+]] = insertelement <2 x i32> undef, i32 %[[T1]], i32 0 ; CHECK: br label %[[CONT0]] ; CHECK: [[CONT0]]: @@ -23,7 +23,7 @@ ; CHECK: br i1 {{.*}}, label %[[IF1:.+]], label %[[CONT1:.+]] ; CHECK: [[IF1]]: ; CHECK: %[[T4:.+]] = extractelement <2 x i32> %wide.masked.load, i32 1 -; CHECK: %[[T5:.+]] = sdiv i32 %[[T4]], %x +; CHECK: %[[T5:.+]] = sdiv nof i32 %[[T4]], %x ; CHECK: %[[T6:.+]] = insertelement <2 x i32> %[[T3]], i32 %[[T5]], i32 1 ; CHECK: br label %[[CONT1]] ; CHECK: [[CONT1]]: @@ -44,7 +44,7 @@ if.then: %tmp2 = getelementptr inbounds i32, i32* %b, i64 %i %tmp3 = load i32, i32* %tmp2, align 4 - %tmp4 = sdiv i32 %tmp3, %x + %tmp4 = sdiv nof i32 %tmp3, %x %tmp5 = add nsw i32 %tmp4, %tmp1 br label %for.inc @@ -67,7 +67,7 @@ ; SINK-GATHER: vector.body: ; SINK-GATHER: pred.udiv.if: ; SINK-GATHER: %[[T0:.+]] = load i32, i32* %{{.*}}, align 4 -; SINK-GATHER: %{{.*}} = udiv i32 %[[T0]], %{{.*}} +; SINK-GATHER: %{{.*}} = udiv nof i32 %[[T0]], %{{.*}} ; SINK-GATHER: pred.udiv.continue: define i32 @scalarize_and_sink_gather(i32* %a, i1 %c, i32 %x, i64 %n) { entry: @@ -82,7 +82,7 @@ if.then: %tmp0 = getelementptr inbounds i32, i32* %a, i64 %i7 %tmp2 = load i32, i32* %tmp0, align 4 - %tmp4 = udiv i32 %tmp2, %x + %tmp4 = udiv nof i32 %tmp2, %x br label %for.inc for.inc: Index: test/Transforms/LoopVectorize/if-conversion.ll =================================================================== --- test/Transforms/LoopVectorize/if-conversion.ll +++ test/Transforms/LoopVectorize/if-conversion.ll @@ -128,7 +128,7 @@ br label %cond.end cond.end: - %cond = phi i32 [ sdiv (i32 1, i32 zext (i1 icmp eq (i32** getelementptr inbounds ([1 x i32*], [1 x i32*]* @a, i64 0, i64 0), i32** @c) to i32)), %cond.false ], [ 0, %for.body ] + %cond = phi i32 [ sdiv nof (i32 1, i32 zext (i1 icmp eq (i32** getelementptr inbounds ([1 x i32*], [1 x i32*]* @a, i64 0, i64 0), i32** @c) to i32)), %cond.false ], [ 0, %for.body ] %or = or i32 %or2, %cond %inc = add nsw i32 %inc3, 1 %cmp = icmp slt i32 %inc, 128 @@ -155,7 +155,7 @@ br i1 icmp eq (i32** getelementptr inbounds ([1 x i32*], [1 x i32*]* @a, i64 0, i64 0), i32** @c), label %cond.false, label %cond.end cond.false: - %cond.1 = or i32 %inc3, sdiv (i32 1, i32 zext (i1 icmp eq (i32** getelementptr inbounds ([1 x i32*], [1 x i32*]* @a, i64 0, i64 1), i32** @c) to i32)) + %cond.1 = or i32 %inc3, sdiv nof (i32 1, i32 zext (i1 icmp eq (i32** getelementptr inbounds ([1 x i32*], [1 x i32*]* @a, i64 0, i64 1), i32** @c) to i32)) br label %cond.end cond.end: Index: test/Transforms/LoopVectorize/if-pred-non-void.ll =================================================================== --- test/Transforms/LoopVectorize/if-pred-non-void.ll +++ test/Transforms/LoopVectorize/if-pred-non-void.ll @@ -23,7 +23,7 @@ ; CHECK: [[CSD]]: ; CHECK: %[[SDA0:[a-zA-Z0-9]+]] = extractelement <2 x i32> %{{.*}}, i32 0 ; CHECK: %[[SDA1:[a-zA-Z0-9]+]] = extractelement <2 x i32> %{{.*}}, i32 0 -; CHECK: %[[SD0:[a-zA-Z0-9]+]] = sdiv i32 %[[SDA0]], %[[SDA1]] +; CHECK: %[[SD0:[a-zA-Z0-9]+]] = sdiv nof i32 %[[SDA0]], %[[SDA1]] ; CHECK: %[[SD1:[a-zA-Z0-9]+]] = insertelement <2 x i32> undef, i32 %[[SD0]], i32 0 ; CHECK: br label %[[ESD]] ; CHECK: [[ESD]]: @@ -33,7 +33,7 @@ ; CHECK: [[CSDH]]: ; CHECK: %[[SDA0H:[a-zA-Z0-9]+]] = extractelement <2 x i32> %{{.*}}, i32 1 ; CHECK: %[[SDA1H:[a-zA-Z0-9]+]] = extractelement <2 x i32> %{{.*}}, i32 1 -; CHECK: %[[SD0H:[a-zA-Z0-9]+]] = sdiv i32 %[[SDA0H]], %[[SDA1H]] +; CHECK: %[[SD0H:[a-zA-Z0-9]+]] = sdiv nof i32 %[[SDA0H]], %[[SDA1H]] ; CHECK: %[[SD1H:[a-zA-Z0-9]+]] = insertelement <2 x i32> %[[SDR]], i32 %[[SD0H]], i32 1 ; CHECK: br label %[[ESDH]] ; CHECK: [[ESDH]]: @@ -44,7 +44,7 @@ ; CHECK: [[CUD]]: ; CHECK: %[[UDA0:[a-zA-Z0-9]+]] = extractelement <2 x i32> %{{.*}}, i32 0 ; CHECK: %[[UDA1:[a-zA-Z0-9]+]] = extractelement <2 x i32> %{{.*}}, i32 0 -; CHECK: %[[UD0:[a-zA-Z0-9]+]] = udiv i32 %[[UDA0]], %[[UDA1]] +; CHECK: %[[UD0:[a-zA-Z0-9]+]] = udiv nof i32 %[[UDA0]], %[[UDA1]] ; CHECK: %[[UD1:[a-zA-Z0-9]+]] = insertelement <2 x i32> undef, i32 %[[UD0]], i32 0 ; CHECK: br label %[[EUD]] ; CHECK: [[EUD]]: @@ -90,8 +90,8 @@ br i1 %cmp1, label %if.then, label %if.end if.then: ; preds = %for.body - %rsd = sdiv i32 %psd, %lsd - %rud = udiv i32 %pud, %lud + %rsd = sdiv nof i32 %psd, %lsd + %rud = udiv nof i32 %pud, %lud %rsr = srem i32 %psr, %lsr %rur = urem i32 %pur, %lur br label %if.end @@ -121,7 +121,7 @@ ; CHECK: vector.body: ; CHECK: br i1 %{{.*}}, label %[[THEN:[a-zA-Z0-9.]+]], label %[[FI:[a-zA-Z0-9.]+]] ; CHECK: [[THEN]]: -; CHECK: %[[PD:[a-zA-Z0-9]+]] = sdiv i32 %{{.*}}, %{{.*}} +; CHECK: %[[PD:[a-zA-Z0-9]+]] = sdiv nof i32 %{{.*}}, %{{.*}} ; CHECK: br label %[[FI]] ; CHECK: [[FI]]: ; CHECK: %{{.*}} = phi i32 [ undef, %vector.body ], [ %[[PD]], %[[THEN]] ] @@ -137,8 +137,8 @@ br i1 %cmp1, label %if.then, label %if.end if.then: ; preds = %for.body - %sd1 = sdiv i32 %psd, %lsd - %rsd = sdiv i32 %lsd.b, %sd1 + %sd1 = sdiv nof i32 %psd, %lsd + %rsd = sdiv nof i32 %lsd.b, %sd1 br label %if.end if.end: ; preds = %if.then, %for.body @@ -166,7 +166,7 @@ ; CHECK: %[[EXTRACT:.+]] = extractelement <2 x i1> %[[OR]], i32 0 ; CHECK: br i1 %[[EXTRACT]], label %[[THEN:[a-zA-Z0-9.]+]], label %[[FI:[a-zA-Z0-9.]+]] ; CHECK: [[THEN]]: -; CHECK: %[[PD:[a-zA-Z0-9]+]] = sdiv i32 %{{.*}}, %{{.*}} +; CHECK: %[[PD:[a-zA-Z0-9]+]] = sdiv nof i32 %{{.*}}, %{{.*}} ; CHECK: br label %[[FI]] ; CHECK: [[FI]]: ; CHECK: %{{.*}} = phi i32 [ undef, %vector.body ], [ %[[PD]], %[[THEN]] ] @@ -187,8 +187,8 @@ br i1 %cmp2, label %if.then, label %if.end if.then: ; preds = %check, %for.body - %sd1 = sdiv i32 %psd, %lsd - %rsd = sdiv i32 %lsd.b, %sd1 + %sd1 = sdiv nof i32 %psd, %lsd + %rsd = sdiv nof i32 %lsd.b, %sd1 br label %if.end if.end: ; preds = %if.then, %check @@ -212,7 +212,7 @@ ; CHECK: %[[T00:.+]] = extractelement <2 x i32> %wide.load, i32 0 ; CHECK: %[[T01:.+]] = add nsw i32 %[[T00]], %x ; CHECK: %[[T02:.+]] = extractelement <2 x i32> %wide.load, i32 0 -; CHECK: %[[T03:.+]] = udiv i32 %[[T02]], %[[T01]] +; CHECK: %[[T03:.+]] = udiv nof i32 %[[T02]], %[[T01]] ; CHECK: %[[T04:.+]] = insertelement <2 x i32> undef, i32 %[[T03]], i32 0 ; CHECK: br label %[[CONT0]] ; CHECK: [[CONT0]]: @@ -222,7 +222,7 @@ ; CHECK: %[[T06:.+]] = extractelement <2 x i32> %wide.load, i32 1 ; CHECK: %[[T07:.+]] = add nsw i32 %[[T06]], %x ; CHECK: %[[T08:.+]] = extractelement <2 x i32> %wide.load, i32 1 -; CHECK: %[[T09:.+]] = udiv i32 %[[T08]], %[[T07]] +; CHECK: %[[T09:.+]] = udiv nof i32 %[[T08]], %[[T07]] ; CHECK: %[[T10:.+]] = insertelement <2 x i32> %[[T05]], i32 %[[T09]], i32 1 ; CHECK: br label %[[CONT1]] ; CHECK: [[CONT1]]: @@ -239,14 +239,14 @@ ; UNROLL-NO-VF: br i1 {{.*}}, label %[[IF0:.+]], label %[[CONT0:.+]] ; UNROLL-NO-VF: [[IF0]]: ; UNROLL-NO-VF: %[[ADD0:.+]] = add nsw i32 %[[LOAD0]], %x -; UNROLL-NO-VF: %[[DIV0:.+]] = udiv i32 %[[LOAD0]], %[[ADD0]] +; UNROLL-NO-VF: %[[DIV0:.+]] = udiv nof i32 %[[LOAD0]], %[[ADD0]] ; UNROLL-NO-VF: br label %[[CONT0]] ; UNROLL-NO-VF: [[CONT0]]: ; UNROLL-NO-VF: phi i32 [ undef, %vector.body ], [ %[[DIV0]], %[[IF0]] ] ; UNROLL-NO-VF: br i1 {{.*}}, label %[[IF1:.+]], label %[[CONT1:.+]] ; UNROLL-NO-VF: [[IF1]]: ; UNROLL-NO-VF: %[[ADD1:.+]] = add nsw i32 %[[LOAD1]], %x -; UNROLL-NO-VF: %[[DIV1:.+]] = udiv i32 %[[LOAD1]], %[[ADD1]] +; UNROLL-NO-VF: %[[DIV1:.+]] = udiv nof i32 %[[LOAD1]], %[[ADD1]] ; UNROLL-NO-VF: br label %[[CONT1]] ; UNROLL-NO-VF: [[CONT1]]: ; UNROLL-NO-VF: phi i32 [ undef, %[[CONT0]] ], [ %[[DIV1]], %[[IF1]] ] @@ -261,7 +261,7 @@ if.then: %tmp3 = add nsw i32 %tmp2, %x - %tmp4 = udiv i32 %tmp2, %tmp3 + %tmp4 = udiv nof i32 %tmp2, %tmp3 br label %for.inc for.inc: Index: test/Transforms/LoopVectorize/if-pred-not-when-safe.ll =================================================================== --- test/Transforms/LoopVectorize/if-pred-not-when-safe.ll +++ test/Transforms/LoopVectorize/if-pred-not-when-safe.ll @@ -17,12 +17,12 @@ ; CHECK-LABEL: test ; CHECK: vector.body: -; CHECK: %{{.*}} = sdiv <2 x i32> %{{.*}}, <i32 11, i32 11> -; CHECK: %{{.*}} = udiv <2 x i32> %{{.*}}, <i32 13, i32 13> +; CHECK: %{{.*}} = sdiv nof <2 x i32> %{{.*}}, <i32 11, i32 11> +; CHECK: %{{.*}} = udiv nof <2 x i32> %{{.*}}, <i32 13, i32 13> ; CHECK: %{{.*}} = srem <2 x i32> %{{.*}}, <i32 17, i32 17> ; CHECK: %{{.*}} = urem <2 x i32> %{{.*}}, <i32 19, i32 19> -; CHECK-NOT: %{{.*}} = sdiv <2 x i32> %{{.*}}, <i32 0, i32 0> -; CHECK-NOT: %{{.*}} = udiv <2 x i32> %{{.*}}, <i32 0, i32 0> +; CHECK-NOT: %{{.*}} = sdiv nof <2 x i32> %{{.*}}, <i32 0, i32 0> +; CHECK-NOT: %{{.*}} = udiv nof <2 x i32> %{{.*}}, <i32 0, i32 0> ; CHECK-NOT: %{{.*}} = srem <2 x i32> %{{.*}}, <i32 0, i32 0> ; CHECK-NOT: %{{.*}} = urem <2 x i32> %{{.*}}, <i32 0, i32 0> @@ -56,12 +56,12 @@ br i1 %cmp1, label %if.then, label %if.end if.then: ; preds = %for.body - %rsd = sdiv i32 %psd, 11 - %rud = udiv i32 %pud, 13 + %rsd = sdiv nof i32 %psd, 11 + %rud = udiv nof i32 %pud, 13 %rsr = srem i32 %psr, 17 %rur = urem i32 %pur, 19 - %rsd0 = sdiv i32 %psd0, 0 - %rud0 = udiv i32 %pud0, 0 + %rsd0 = sdiv nof i32 %psd0, 0 + %rud0 = udiv nof i32 %pud0, 0 %rsr0 = srem i32 %psr0, 0 %rur0 = urem i32 %pur0, 0 br label %if.end Index: test/Transforms/LoopVectorize/induction.ll =================================================================== --- test/Transforms/LoopVectorize/induction.ll +++ test/Transforms/LoopVectorize/induction.ll @@ -297,7 +297,7 @@ ; PR30542. Ensure we generate all the scalar steps for the induction variable. ; The scalar induction variable is used by a getelementptr instruction -; (uniform), and a udiv (non-uniform). +; (uniform), and a udiv nof (non-uniform). ; ; int sum = 0; ; for (int i = 0; i < n; ++i) { @@ -313,10 +313,10 @@ ; CHECK: %[[I0:.+]] = add i32 %index, 0 ; CHECK: getelementptr inbounds i32, i32* %a, i32 %[[I0]] ; CHECK: pred.udiv.if: -; CHECK: udiv i32 {{.*}}, %[[I0]] +; CHECK: udiv nof i32 {{.*}}, %[[I0]] ; CHECK: pred.udiv.if{{[0-9]+}}: ; CHECK: %[[I1:.+]] = add i32 %index, 1 -; CHECK: udiv i32 {{.*}}, %[[I1]] +; CHECK: udiv nof i32 {{.*}}, %[[I1]] ; ; UNROLL-NO_IC-LABEL: @scalarize_induction_variable_05( ; UNROLL-NO-IC: vector.body: @@ -326,15 +326,15 @@ ; UNROLL-NO-IC: getelementptr inbounds i32, i32* %a, i32 %[[I0]] ; UNROLL-NO-IC: getelementptr inbounds i32, i32* %a, i32 %[[I2]] ; UNROLL-NO-IC: pred.udiv.if: -; UNROLL-NO-IC: udiv i32 {{.*}}, %[[I0]] +; UNROLL-NO-IC: udiv nof i32 {{.*}}, %[[I0]] ; UNROLL-NO-IC: pred.udiv.if{{[0-9]+}}: ; UNROLL-NO-IC: %[[I1:.+]] = add i32 %index, 1 -; UNROLL-NO-IC: udiv i32 {{.*}}, %[[I1]] +; UNROLL-NO-IC: udiv nof i32 {{.*}}, %[[I1]] ; UNROLL-NO-IC: pred.udiv.if{{[0-9]+}}: -; UNROLL-NO-IC: udiv i32 {{.*}}, %[[I2]] +; UNROLL-NO-IC: udiv nof i32 {{.*}}, %[[I2]] ; UNROLL-NO-IC: pred.udiv.if{{[0-9]+}}: ; UNROLL-NO-IC: %[[I3:.+]] = add i32 %index, 3 -; UNROLL-NO-IC: udiv i32 {{.*}}, %[[I3]] +; UNROLL-NO-IC: udiv nof i32 {{.*}}, %[[I3]] ; ; IND-LABEL: @scalarize_induction_variable_05( ; IND: vector.body: @@ -342,10 +342,10 @@ ; IND: %[[E0:.+]] = sext i32 %index to i64 ; IND: getelementptr inbounds i32, i32* %a, i64 %[[E0]] ; IND: pred.udiv.if: -; IND: udiv i32 {{.*}}, %index +; IND: udiv nof i32 {{.*}}, %index ; IND: pred.udiv.if{{[0-9]+}}: ; IND: %[[I1:.+]] = or i32 %index, 1 -; IND: udiv i32 {{.*}}, %[[I1]] +; IND: udiv nof i32 {{.*}}, %[[I1]] ; ; UNROLL-LABEL: @scalarize_induction_variable_05( ; UNROLL: vector.body: @@ -355,15 +355,15 @@ ; UNROLL: %[[G0:.+]] = getelementptr inbounds i32, i32* %a, i64 %[[E0]] ; UNROLL: getelementptr i32, i32* %[[G0]], i64 2 ; UNROLL: pred.udiv.if: -; UNROLL: udiv i32 {{.*}}, %index +; UNROLL: udiv nof i32 {{.*}}, %index ; UNROLL: pred.udiv.if{{[0-9]+}}: ; UNROLL: %[[I1:.+]] = or i32 %index, 1 -; UNROLL: udiv i32 {{.*}}, %[[I1]] +; UNROLL: udiv nof i32 {{.*}}, %[[I1]] ; UNROLL: pred.udiv.if{{[0-9]+}}: -; UNROLL: udiv i32 {{.*}}, %[[I2]] +; UNROLL: udiv nof i32 {{.*}}, %[[I2]] ; UNROLL: pred.udiv.if{{[0-9]+}}: ; UNROLL: %[[I3:.+]] = or i32 %index, 3 -; UNROLL: udiv i32 {{.*}}, %[[I3]] +; UNROLL: udiv nof i32 {{.*}}, %[[I3]] define i32 @scalarize_induction_variable_05(i32* %a, i32 %x, i1 %c, i32 %n) { entry: @@ -377,7 +377,7 @@ br i1 %c, label %if.then, label %if.end if.then: - %tmp2 = udiv i32 %tmp1, %i + %tmp2 = udiv nof i32 %tmp1, %i br label %if.end if.end: Index: test/Transforms/LoopVectorize/no_idiv_reduction.ll =================================================================== --- test/Transforms/LoopVectorize/no_idiv_reduction.ll +++ test/Transforms/LoopVectorize/no_idiv_reduction.ll @@ -8,12 +8,12 @@ for.body: ; CHECK-LABEL: @g( - ; CHECK-NOT: sdiv <2 x i32> + ; CHECK-NOT: sdiv nof <2 x i32> %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %r.05 = phi i32 [ 80, %entry ], [ %div, %for.body ] %arrayidx = getelementptr inbounds [128 x i32], [128 x i32]* @a, i64 0, i64 %indvars.iv %0 = load i32, i32* %arrayidx, align 4 - %div = sdiv i32 %r.05, %0 + %div = sdiv nof i32 %r.05, %0 %indvars.iv.next = add i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 %exitcond = icmp eq i32 %lftr.wideiv, 1024 Index: test/Transforms/LoopVectorize/pr33706.ll =================================================================== --- test/Transforms/LoopVectorize/pr33706.ll +++ test/Transforms/LoopVectorize/pr33706.ll @@ -20,7 +20,7 @@ br label %bb27 bb9: ; preds = %bb - %tmp10 = udiv i32 65536, %arg2 + %tmp10 = udiv nof i32 65536, %arg2 br label %bb11 bb11: ; preds = %bb11, %bb9 Index: test/Transforms/LoopVectorize/reduction-small-size.ll =================================================================== --- test/Transforms/LoopVectorize/reduction-small-size.ll +++ test/Transforms/LoopVectorize/reduction-small-size.ll @@ -24,7 +24,7 @@ br i1 %c, label %if.then, label %if.end if.then: - %tmp0 = sdiv i32 undef, undef + %tmp0 = sdiv nof i32 undef, undef br label %if.end if.end: Index: test/Transforms/NewGVN/calls-readonly.ll =================================================================== --- test/Transforms/NewGVN/calls-readonly.ll +++ test/Transforms/NewGVN/calls-readonly.ll @@ -11,7 +11,7 @@ br i1 %1, label %bb, label %bb1 bb: ; preds = %entry - %2 = sdiv i32 %x, %y ; <i32> [#uses=1] + %2 = sdiv nof i32 %x, %y ; <i32> [#uses=1] br label %bb1 bb1: ; preds = %bb, %entry @@ -30,7 +30,7 @@ ; CHECK-NEXT: %1 = icmp eq i32 %0, 0 ; CHECK-NEXT: br i1 %1, label %bb, label %bb1 ; CHECK: bb: -; CHECK-NEXT: %2 = sdiv i32 %x, %y +; CHECK-NEXT: %2 = sdiv nof i32 %x, %y ; CHECK-NEXT: br label %bb1 ; CHECK: bb1: ; CHECK-NEXT: %x_addr.0 = phi i32 [ %2, %bb ], [ %x, %entry ] Index: test/Transforms/NewGVN/pr32838.ll =================================================================== --- test/Transforms/NewGVN/pr32838.ll +++ test/Transforms/NewGVN/pr32838.ll @@ -20,7 +20,7 @@ ; CHECK: for.cond17thread-pre-split: ; CHECK-NEXT: br label [[COND_TRUE]] ; CHECK: cond.true: -; CHECK-NEXT: [[DIV]] = sdiv i64 [[ARG:%.*]], 4 +; CHECK-NEXT: [[DIV]] = sdiv nof i64 [[ARG:%.*]], 4 ; CHECK-NEXT: br label [[THIRDPHIBLOCK]] ; CHECK: temp: ; CHECK-NEXT: ret void @@ -42,7 +42,7 @@ br label %cond.true cond.true: %fourthphi = phi i64 [ %arg, %entry ], [ %firstphi, %for.cond17thread-pre-split ] - %div = sdiv i64 %fourthphi, 4 + %div = sdiv nof i64 %fourthphi, 4 br label %thirdphiblock temp: ret void @@ -66,7 +66,7 @@ ; CHECK-NEXT: br label [[COND_TRUE]] ; CHECK: cond.true: ; CHECK-NEXT: [[FOURTHPHI:%.*]] = phi i64 [ [[ARG:%.*]], [[ENTRY:%.*]] ], [ [[FIRSTPHI]], %for.cond17thread-pre-split ] -; CHECK-NEXT: [[DIV]] = sdiv i64 [[FOURTHPHI]], 4 +; CHECK-NEXT: [[DIV]] = sdiv nof i64 [[FOURTHPHI]], 4 ; CHECK-NEXT: br label [[THIRDPHIBLOCK]] ; CHECK: temp: ; CHECK-NEXT: ret void @@ -88,7 +88,7 @@ br label %cond.true cond.true: %fourthphi = phi i64 [ %arg, %entry ], [ %firstphi, %for.cond17thread-pre-split ] - %div = sdiv i64 %fourthphi, 4 + %div = sdiv nof i64 %fourthphi, 4 br label %thirdphiblock temp: ret void Index: test/Transforms/NewGVN/pr33185.ll =================================================================== --- test/Transforms/NewGVN/pr33185.ll +++ test/Transforms/NewGVN/pr33185.ll @@ -16,7 +16,7 @@ ; CHECK-NEXT: [[MUL_I:%.*]] = select i1 [[CMP1_I]], i32 [[F_08_I]], i32 0 ; CHECK-NEXT: br i1 [[TMP1]], label [[COND_END_I]], label [[COND_TRUE_I:%.*]] ; CHECK: cond.true.i: -; CHECK-NEXT: [[DIV_I:%.*]] = udiv i32 [[MUL_I]], [[F_08_I]] +; CHECK-NEXT: [[DIV_I:%.*]] = udiv nof i32 [[MUL_I]], [[F_08_I]] ; CHECK-NEXT: br label [[COND_END_I]] ; CHECK: cond.end.i: ; CHECK-NEXT: [[COND_I:%.*]] = phi i32 [ [[DIV_I]], [[COND_TRUE_I]] ], [ 0, [[FOR_BODY_I]] ] @@ -40,7 +40,7 @@ cond.true.i: ;; Ensure we don't replace this divide with a phi of ops that merges the wrong loop iteration value - %div.i = udiv i32 %mul.i, %f.08.i + %div.i = udiv nof i32 %mul.i, %f.08.i br label %cond.end.i cond.end.i: @@ -57,10 +57,10 @@ declare i32 @printf(i8* nocapture readonly, ...) -;; Variant of the above where we have made the udiv available in each predecessor with the wrong values. +;; Variant of the above where we have made the udiv nof available in each predecessor with the wrong values. ;; In the entry block, it is always 0, so we don't try to create a leader there, only in %cond.end.i. ;; We should not create a phi of ops for it using these leaders. -;; A correct phi of ops for this udiv would be phi(0, 1), which we are not smart enough to figure out. +;; A correct phi of ops for this udiv nof would be phi(0, 1), which we are not smart enough to figure out. ;; If we reuse the incorrect leaders, we will get phi(0, 0). define i32 @test2() local_unnamed_addr { ; CHECK-LABEL: @test2( @@ -74,7 +74,7 @@ ; CHECK-NEXT: [[MUL_I:%.*]] = select i1 [[CMP1_I]], i32 [[F_08_I]], i32 0 ; CHECK-NEXT: br i1 [[TMP1]], label [[COND_END_I]], label [[COND_TRUE_I:%.*]] ; CHECK: cond.true.i: -; CHECK-NEXT: [[DIV_I:%.*]] = udiv i32 [[MUL_I]], [[F_08_I]] +; CHECK-NEXT: [[DIV_I:%.*]] = udiv nof i32 [[MUL_I]], [[F_08_I]] ; CHECK-NEXT: br label [[COND_END_I]] ; CHECK: cond.end.i: ; CHECK-NEXT: [[COND_I:%.*]] = phi i32 [ [[DIV_I]], [[COND_TRUE_I]] ], [ 0, [[FOR_BODY_I]] ] @@ -99,13 +99,13 @@ cond.true.i: ;; Ensure we don't replace this divide with a phi of ops that merges the wrong loop iteration value - %div.i = udiv i32 %mul.i, %f.08.i + %div.i = udiv nof i32 %mul.i, %f.08.i br label %cond.end.i cond.end.i: %cond.i = phi i32 [ %div.i, %cond.true.i ], [ 0, %for.body.i ] %inc.i = add nuw nsw i32 %f.08.i, 1 - %test = udiv i32 %mul.i, %inc.i + %test = udiv nof i32 %mul.i, %inc.i %call5= tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str4, i64 0, i64 0), i32 %test) %exitcond.i = icmp eq i32 %inc.i, 4 br i1 %exitcond.i, label %fn1.exit, label %for.body.i Index: test/Transforms/PhaseOrdering/basic.ll =================================================================== --- test/Transforms/PhaseOrdering/basic.ll +++ test/Transforms/PhaseOrdering/basic.ll @@ -30,7 +30,7 @@ ; evolution can recognize it. define i32 @test2(i32 %a, i32* %p) nounwind uwtable ssp { entry: - %div = udiv i32 %a, 4 + %div = udiv nof i32 %a, 4 %arrayidx = getelementptr inbounds i32, i32* %p, i64 0 store i32 %div, i32* %arrayidx, align 4 %add = add i32 %div, %div Index: test/Transforms/PhaseOrdering/scev.ll =================================================================== --- test/Transforms/PhaseOrdering/scev.ll +++ test/Transforms/PhaseOrdering/scev.ll @@ -10,7 +10,7 @@ ; CHECK: --> {%p,+,(8 * (%d /u 4))} define void @test1(i64 %d, i32* %p) nounwind uwtable ssp { entry: - %div = udiv i64 %d, 4 + %div = udiv nof i64 %d, 4 br label %for.cond for.cond: ; preds = %for.inc, %entry @@ -39,7 +39,7 @@ ; CHECK: --> {%p,+,(8 * (%d /u 2))} define void @test1a(i64 %d, i32* %p) nounwind uwtable ssp { entry: - %div = udiv i64 %d, 2 + %div = udiv nof i64 %d, 2 br label %for.cond for.cond: ; preds = %for.inc, %entry Index: test/Transforms/PruneEH/seh-nounwind.ll =================================================================== --- test/Transforms/PruneEH/seh-nounwind.ll +++ test/Transforms/PruneEH/seh-nounwind.ll @@ -6,7 +6,7 @@ define i32 @div(i32 %n, i32 %d) nounwind { entry: - %div = sdiv i32 %n, %d + %div = sdiv nof i32 %n, %d ret i32 %div } Index: test/Transforms/SCCP/apint-basictest3.ll =================================================================== --- test/Transforms/SCCP/apint-basictest3.ll +++ test/Transforms/SCCP/apint-basictest3.ll @@ -13,7 +13,7 @@ %t3 = mul i128 %t2, -1 br label %BB3 BB2: - %f1 = udiv i128 -1, 1 + %f1 = udiv nof i128 -1, 1 %f2 = add i128 %f1, 1 %f3 = urem i128 %f2, 2121 br label %BB3 Index: test/Transforms/SCCP/overdefined-div.ll =================================================================== --- test/Transforms/SCCP/overdefined-div.ll +++ test/Transforms/SCCP/overdefined-div.ll @@ -6,27 +6,27 @@ ; CHECK-LABEL: test1 ; CHECK-NEXT: ret i32 0 define i32 @test1(i32 %foo) { - %tinkywinky = udiv i32 0, %foo + %tinkywinky = udiv nof i32 0, %foo ret i32 %tinkywinky } ; CHECK-LABEL: test2 ; CHECK-NEXT: ret i32 0 define i32 @test2(i32 %foo) { - %tinkywinky = sdiv i32 0, %foo + %tinkywinky = sdiv nof i32 0, %foo ret i32 %tinkywinky } ; CHECK-LABEL: test3 ; CHECK: ret i32 %tinkywinky define i32 @test3(i32 %foo) { - %tinkywinky = udiv i32 %foo, 0 + %tinkywinky = udiv nof i32 %foo, 0 ret i32 %tinkywinky } ; CHECK-LABEL: test4 ; CHECK: ret i32 %tinkywinky define i32 @test4(i32 %foo) { - %tinkywinky = sdiv i32 %foo, 0 + %tinkywinky = sdiv nof i32 %foo, 0 ret i32 %tinkywinky } Index: test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll =================================================================== --- test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll +++ test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll @@ -5,21 +5,21 @@ ; CHECK-LABEL: @test1 ; CHECK: load <4 x i32> ; CHECK: add nsw <4 x i32> -; CHECK: sdiv <4 x i32> +; CHECK: sdiv nof <4 x i32> define void @test1(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* noalias nocapture readonly %c) { entry: %0 = load i32, i32* %b, align 4 %1 = load i32, i32* %c, align 4 %add = add nsw i32 %1, %0 - %div = sdiv i32 %add, 2 + %div = sdiv nof i32 %add, 2 store i32 %div, i32* %a, align 4 %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 1 %2 = load i32, i32* %arrayidx3, align 4 %arrayidx4 = getelementptr inbounds i32, i32* %c, i64 1 %3 = load i32, i32* %arrayidx4, align 4 %add5 = add nsw i32 %3, %2 - %div6 = sdiv i32 %add5, 2 + %div6 = sdiv nof i32 %add5, 2 %arrayidx7 = getelementptr inbounds i32, i32* %a, i64 1 store i32 %div6, i32* %arrayidx7, align 4 %arrayidx8 = getelementptr inbounds i32, i32* %b, i64 2 @@ -27,7 +27,7 @@ %arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2 %5 = load i32, i32* %arrayidx9, align 4 %add10 = add nsw i32 %5, %4 - %div11 = sdiv i32 %add10, 2 + %div11 = sdiv nof i32 %add10, 2 %arrayidx12 = getelementptr inbounds i32, i32* %a, i64 2 store i32 %div11, i32* %arrayidx12, align 4 %arrayidx13 = getelementptr inbounds i32, i32* %b, i64 3 @@ -35,7 +35,7 @@ %arrayidx14 = getelementptr inbounds i32, i32* %c, i64 3 %7 = load i32, i32* %arrayidx14, align 4 %add15 = add nsw i32 %7, %6 - %div16 = sdiv i32 %add15, 2 + %div16 = sdiv nof i32 %add15, 2 %arrayidx17 = getelementptr inbounds i32, i32* %a, i64 3 store i32 %div16, i32* %arrayidx17, align 4 ret void Index: test/Transforms/SLPVectorizer/X86/blending-shuffle.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/blending-shuffle.ll +++ test/Transforms/SLPVectorizer/X86/blending-shuffle.ll @@ -92,7 +92,7 @@ ; CHECK-NEXT: [[TMP5:%.*]] = add <2 x i8> [[TMP2]], [[TMP4]] ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i8> [[TMP5]], i32 0 ; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i8> [[TMP5]], i32 1 -; CHECK-NEXT: [[TMP8:%.*]] = sdiv i8 [[TMP6]], [[TMP7]] +; CHECK-NEXT: [[TMP8:%.*]] = sdiv nof i8 [[TMP6]], [[TMP7]] ; CHECK-NEXT: ret i8 [[TMP8]] ; %x0 = extractelement <4 x i8> %x, i32 0 @@ -105,7 +105,7 @@ %y2y2 = mul i8 %y2, %y2 %1 = add i8 %x0x0, %x3x3 %2 = add i8 %y1y1, %y2y2 - %3 = sdiv i8 %1, %2 + %3 = sdiv nof i8 %1, %2 ret i8 %3 } @@ -118,7 +118,7 @@ ; CHECK-NEXT: [[TMP5:%.*]] = add <2 x i8> [[TMP2]], [[TMP4]] ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i8> [[TMP5]], i32 0 ; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i8> [[TMP5]], i32 1 -; CHECK-NEXT: [[TMP8:%.*]] = sdiv i8 [[TMP6]], [[TMP7]] +; CHECK-NEXT: [[TMP8:%.*]] = sdiv nof i8 [[TMP6]], [[TMP7]] ; CHECK-NEXT: ret i8 [[TMP8]] ; %x0 = extractelement <4 x i8> %x, i32 0 @@ -131,7 +131,7 @@ %x2x2 = mul i8 %x2, %x2 %1 = add i8 %x0x0, %x3x3 %2 = add i8 %x1x1, %x2x2 - %3 = sdiv i8 %1, %2 + %3 = sdiv nof i8 %1, %2 ret i8 %3 } @@ -146,7 +146,7 @@ ; CHECK-NEXT: [[TMP5:%.*]] = add <2 x i8> [[TMP2]], [[TMP4]] ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i8> [[TMP5]], i32 0 ; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i8> [[TMP5]], i32 1 -; CHECK-NEXT: [[TMP8:%.*]] = sdiv i8 [[TMP6]], [[TMP7]] +; CHECK-NEXT: [[TMP8:%.*]] = sdiv nof i8 [[TMP6]], [[TMP7]] ; CHECK-NEXT: ret i8 [[TMP8]] ; %x0 = extractelement <4 x i8> %x, i32 0 @@ -161,6 +161,6 @@ %x2x2 = mul i8 %x2, %x2 %1 = add i8 %x0x0, %x3x3 %2 = add i8 %x1x1, %x2x2 - %3 = sdiv i8 %1, %2 + %3 = sdiv nof i8 %1, %2 ret i8 %3 } Index: test/Transforms/SLPVectorizer/X86/powof2div.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/powof2div.ll +++ test/Transforms/SLPVectorizer/X86/powof2div.ll @@ -6,20 +6,20 @@ ;CHECK-LABEL: @powof2div( ;CHECK: load <4 x i32>, <4 x i32>* ;CHECK: add nsw <4 x i32> -;CHECK: sdiv <4 x i32> +;CHECK: sdiv nof <4 x i32> define void @powof2div(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32* noalias nocapture readonly %c){ entry: %0 = load i32, i32* %b, align 4 %1 = load i32, i32* %c, align 4 %add = add nsw i32 %1, %0 - %div = sdiv i32 %add, 2 + %div = sdiv nof i32 %add, 2 store i32 %div, i32* %a, align 4 %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 1 %2 = load i32, i32* %arrayidx3, align 4 %arrayidx4 = getelementptr inbounds i32, i32* %c, i64 1 %3 = load i32, i32* %arrayidx4, align 4 %add5 = add nsw i32 %3, %2 - %div6 = sdiv i32 %add5, 2 + %div6 = sdiv nof i32 %add5, 2 %arrayidx7 = getelementptr inbounds i32, i32* %a, i64 1 store i32 %div6, i32* %arrayidx7, align 4 %arrayidx8 = getelementptr inbounds i32, i32* %b, i64 2 @@ -27,7 +27,7 @@ %arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2 %5 = load i32, i32* %arrayidx9, align 4 %add10 = add nsw i32 %5, %4 - %div11 = sdiv i32 %add10, 2 + %div11 = sdiv nof i32 %add10, 2 %arrayidx12 = getelementptr inbounds i32, i32* %a, i64 2 store i32 %div11, i32* %arrayidx12, align 4 %arrayidx13 = getelementptr inbounds i32, i32* %b, i64 3 @@ -35,7 +35,7 @@ %arrayidx14 = getelementptr inbounds i32, i32* %c, i64 3 %7 = load i32, i32* %arrayidx14, align 4 %add15 = add nsw i32 %7, %6 - %div16 = sdiv i32 %add15, 2 + %div16 = sdiv nof i32 %add15, 2 %arrayidx17 = getelementptr inbounds i32, i32* %a, i64 3 store i32 %div16, i32* %arrayidx17, align 4 ret void Index: test/Transforms/SampleProfile/cov-zero-samples.ll =================================================================== --- test/Transforms/SampleProfile/cov-zero-samples.ll +++ test/Transforms/SampleProfile/cov-zero-samples.ll @@ -62,7 +62,7 @@ if.end: ; preds = %if.then, %for.body %6 = load i64, i64* %i, align 8, !dbg !44 - %div = sdiv i64 %6, 239, !dbg !45 + %div = sdiv nof i64 %6, 239, !dbg !45 %7 = load i32, i32* %sum, align 4, !dbg !46 %conv2 = sext i32 %7 to i64, !dbg !46 %mul = mul nsw i64 %conv2, %div, !dbg !46 Index: test/Transforms/SampleProfile/fnptr.ll =================================================================== --- test/Transforms/SampleProfile/fnptr.ll +++ test/Transforms/SampleProfile/fnptr.ll @@ -97,7 +97,7 @@ br label %for.inc, !dbg !18 if.else: ; preds = %for.body3 - %div = sdiv i32 %j.023, 840, !dbg !19 + %div = sdiv nof i32 %j.023, 840, !dbg !19 %sub = sub nsw i32 %i.025, %div, !dbg !19 %call10 = tail call double %_Z3fooi._Z3bari(i32 %sub), !dbg !19 br label %for.inc Index: test/Transforms/SampleProfile/propagate.ll =================================================================== --- test/Transforms/SampleProfile/propagate.ll +++ test/Transforms/SampleProfile/propagate.ll @@ -82,7 +82,7 @@ for.body: ; preds = %for.cond %6 = load i64, i64* %i, align 8, !dbg !39 %7 = load i64, i64* %N.addr, align 8, !dbg !42 - %div = sdiv i64 %7, 3, !dbg !43 + %div = sdiv nof i64 %7, 3, !dbg !43 %cmp2 = icmp sgt i64 %6, %div, !dbg !44 br i1 %cmp2, label %if.then3, label %if.end, !dbg !45 ; CHECK: edge for.body -> if.then3 probability is 0x51292fa6 / 0x80000000 = 63.41% @@ -97,7 +97,7 @@ if.end: ; preds = %if.then3, %for.body %9 = load i64, i64* %i, align 8, !dbg !48 %10 = load i64, i64* %N.addr, align 8, !dbg !50 - %div4 = sdiv i64 %10, 4, !dbg !51 + %div4 = sdiv nof i64 %10, 4, !dbg !51 %cmp5 = icmp sgt i64 %9, %div4, !dbg !52 br i1 %cmp5, label %if.then6, label %if.else7, !dbg !53 ; CHECK: edge if.end -> if.then6 probability is 0x5d89d89e / 0x80000000 = 73.08% Index: test/Transforms/SimplifyCFG/2006-10-19-UncondDiv.ll =================================================================== --- test/Transforms/SimplifyCFG/2006-10-19-UncondDiv.ll +++ test/Transforms/SimplifyCFG/2006-10-19-UncondDiv.ll @@ -12,7 +12,7 @@ cond_true182: ; preds = %cond_false179 br label %cond_next185 cond_next185: ; preds = %cond_true182, %cond_false179 - %d0.3 = phi i32 [ udiv (i32 1, i32 ptrtoint (i32* @G to i32)), %cond_true182 ], [ %tmp, %cond_false179 ] ; <i32> [#uses=1] + %d0.3 = phi i32 [ udiv nof (i32 1, i32 ptrtoint (i32* @G to i32)), %cond_true182 ], [ %tmp, %cond_false179 ] ; <i32> [#uses=1] ret i32 %d0.3 } @@ -23,7 +23,7 @@ cond_true182: ; preds = %cond_false179 br label %cond_next185 cond_next185: ; preds = %cond_true182, %cond_false179 - %d0.3 = phi i32 [ udiv (i32 1, i32 ptrtoint (i32* @G to i32)), %cond_true182 ], [ %tmp, %cond_false179 ] ; <i32> [#uses=1] + %d0.3 = phi i32 [ udiv nof (i32 1, i32 ptrtoint (i32* @G to i32)), %cond_true182 ], [ %tmp, %cond_false179 ] ; <i32> [#uses=1] call i32 @test( i32 4 ) ; <i32>:0 [#uses=0] ret i32 %d0.3 } Index: test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll =================================================================== --- test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll +++ test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll @@ -15,7 +15,7 @@ br i1 %toBool, label %cond_true, label %cond_next cond_true: ; preds = %entry - %tmp7 = udiv i32 %tmp, %Z ; <i32> [#uses=1] + %tmp7 = udiv nof i32 %tmp, %Z ; <i32> [#uses=1] br label %cond_next cond_next: ; preds = %cond_true, %entry Index: test/Transforms/SimplifyCFG/ConditionalTrappingConstantExpr.ll =================================================================== --- test/Transforms/SimplifyCFG/ConditionalTrappingConstantExpr.ll +++ test/Transforms/SimplifyCFG/ConditionalTrappingConstantExpr.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: [[C:%.*]] = icmp sle i32 %a, %b ; CHECK-NEXT: br i1 [[C]], label %bb2, label %bb1 ; CHECK: bb1: -; CHECK-NEXT: [[D:%.*]] = icmp sgt i32 sdiv (i32 -32768, i32 ptrtoint (i32* @G to i32)), 0 +; CHECK-NEXT: [[D:%.*]] = icmp sgt i32 sdiv nof (i32 -32768, i32 ptrtoint (i32* @G to i32)), 0 ; CHECK-NEXT: [[DOT:%.*]] = select i1 [[D]], i32 927, i32 42 ; CHECK-NEXT: br label %bb2 ; CHECK: bb2: @@ -21,7 +21,7 @@ %c = icmp sle i32 %a, %b br i1 %c, label %bb2, label %bb1 bb1: - %d = icmp sgt i32 sdiv (i32 -32768, i32 ptrtoint (i32* @G to i32)), 0 + %d = icmp sgt i32 sdiv nof (i32 -32768, i32 ptrtoint (i32* @G to i32)), 0 br i1 %d, label %bb6, label %bb2 bb2: ret i32 42 @@ -33,7 +33,7 @@ ; CHECK-LABEL: @ackbar( ; CHECK-NEXT: br i1 %c, label %bb5, label %bb6 ; CHECK: bb5: -; CHECK-NEXT: [[DOT:%.*]] = select i1 icmp sgt (i32 sdiv (i32 32767, i32 ptrtoint (i32* @G to i32)), i32 0), i32 42, i32 927 +; CHECK-NEXT: [[DOT:%.*]] = select i1 icmp sgt (i32 sdiv nof (i32 32767, i32 ptrtoint (i32* @G to i32)), i32 0), i32 42, i32 927 ; CHECK-NEXT: br label %bb6 ; CHECK: bb6: ; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ 42, %0 ], [ [[DOT]], %bb5 ] @@ -41,7 +41,7 @@ ; br i1 %c, label %bb5, label %bb6 bb5: - br i1 icmp sgt (i32 sdiv (i32 32767, i32 ptrtoint (i32* @G to i32)), i32 0), label %bb6, label %bb7 + br i1 icmp sgt (i32 sdiv nof (i32 32767, i32 ptrtoint (i32* @G to i32)), i32 0), label %bb6, label %bb7 bb6: ret i32 42 bb7: Index: test/Transforms/SimplifyCFG/div-rem-pairs.ll =================================================================== --- test/Transforms/SimplifyCFG/div-rem-pairs.ll +++ test/Transforms/SimplifyCFG/div-rem-pairs.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[REM]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i32 %a, %b ; CHECK-NEXT: br label %end ; CHECK: end: ; CHECK-NEXT: [[RET:%.*]] = phi i32 [ [[DIV]], %if ], [ 3, %entry ] @@ -23,7 +23,7 @@ br i1 %cmp, label %if, label %end if: - %div = sdiv i32 %a, %b + %div = sdiv nof i32 %a, %b br label %end end: @@ -38,7 +38,7 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[REM]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: -; CHECK-NEXT: [[DIV:%.*]] = udiv i64 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i64 %a, %b ; CHECK-NEXT: br label %end ; CHECK: end: ; CHECK-NEXT: [[RET:%.*]] = phi i64 [ [[DIV]], %if ], [ 3, %entry ] @@ -50,7 +50,7 @@ br i1 %cmp, label %if, label %end if: - %div = udiv i64 %a, %b + %div = udiv nof i64 %a, %b br label %end end: @@ -61,7 +61,7 @@ define i16 @hoist_srem(i16 %a, i16 %b) { ; CHECK-LABEL: @hoist_srem( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[DIV:%.*]] = sdiv i16 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = sdiv nof i16 %a, %b ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[DIV]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: @@ -72,7 +72,7 @@ ; CHECK-NEXT: ret i16 [[RET]] ; entry: - %div = sdiv i16 %a, %b + %div = sdiv nof i16 %a, %b %cmp = icmp eq i16 %div, 42 br i1 %cmp, label %if, label %end @@ -88,7 +88,7 @@ define i8 @hoist_urem(i8 %a, i8 %b) { ; CHECK-LABEL: @hoist_urem( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[DIV:%.*]] = udiv i8 %a, %b +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i8 %a, %b ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[DIV]], 42 ; CHECK-NEXT: br i1 [[CMP]], label %if, label %end ; CHECK: if: @@ -99,7 +99,7 @@ ; CHECK-NEXT: ret i8 [[RET]] ; entry: - %div = udiv i8 %a, %b + %div = udiv nof i8 %a, %b %cmp = icmp eq i8 %div, 42 br i1 %cmp, label %if, label %end Index: test/Transforms/SimplifyCFG/multiple-phis.ll =================================================================== --- test/Transforms/SimplifyCFG/multiple-phis.ll +++ test/Transforms/SimplifyCFG/multiple-phis.ll @@ -16,7 +16,7 @@ ; CHECK-NEXT: br i1 [[CMP]], label [[WHILE_BODY]], label [[WHILE_END:%.*]] ; CHECK: while.body: ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[LOW_0]], [[HIGH_ADDR_0]] -; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 2 +; CHECK-NEXT: [[DIV:%.*]] = udiv nof i32 [[ADD]], 2 ; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[DIV]] to i64 ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[R:%.*]], i64 [[IDXPROM]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]] @@ -39,7 +39,7 @@ while.body: ; preds = %while.cond %add = add i32 %low.0, %high.addr.0 - %div = udiv i32 %add, 2 + %div = udiv nof i32 %add, 2 %idxprom = zext i32 %div to i64 %arrayidx = getelementptr inbounds i32, i32* %r, i64 %idxprom %0 = load i32, i32* %arrayidx Index: test/Transforms/SimplifyCFG/seh-nounwind.ll =================================================================== --- test/Transforms/SimplifyCFG/seh-nounwind.ll +++ test/Transforms/SimplifyCFG/seh-nounwind.ll @@ -6,7 +6,7 @@ define i32 @div(i32 %n, i32 %d) nounwind { entry: - %div = sdiv i32 %n, %d + %div = sdiv nof i32 %n, %d ret i32 %div } Index: test/Transforms/Util/PredicateInfo/pr33456.ll =================================================================== --- test/Transforms/Util/PredicateInfo/pr33456.ll +++ test/Transforms/Util/PredicateInfo/pr33456.ll @@ -20,7 +20,7 @@ ; CHECK-NEXT: br i1 [[TMP8]], label [[TMP9]], label [[TMP9]] ; CHECK: [[DOT0:%.*]] = phi i32 [ [[TMP4]], [[TMP7]] ], [ [[TMP4]], [[TMP7]] ], [ [[DOT1:%.*]], [[TMP13]] ], [ [[TMP4]], [[TMP3]] ] ; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* @b, align 4 -; CHECK-NEXT: [[TMP11:%.*]] = sdiv i32 [[TMP10]], [[DOT0]] +; CHECK-NEXT: [[TMP11:%.*]] = sdiv nof i32 [[TMP10]], [[DOT0]] ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[TMP11]], 0 ; CHECK-NEXT: br i1 [[TMP12]], label [[TMP13]], label [[TMP13]] ; CHECK: [[DOT1]] = phi i32 [ [[DOT0]], [[TMP9]] ], [ [[DOT0]], [[TMP9]] ], [ undef, [[TMP0:%.*]] ] @@ -46,7 +46,7 @@ ; <label>:9: ; preds = %13, %7, %7, %3 %.0 = phi i32 [ %4, %7 ], [ %4, %7 ], [ %.1, %13 ], [ %4, %3 ] %10 = load i32, i32* @b, align 4 - %11 = sdiv i32 %10, %.0 + %11 = sdiv nof i32 %10, %.0 %12 = icmp eq i32 %11, 0 br i1 %12, label %13, label %13 Index: unittests/IR/ConstantsTest.cpp =================================================================== --- unittests/IR/ConstantsTest.cpp +++ unittests/IR/ConstantsTest.cpp @@ -242,8 +242,8 @@ CHECK(ConstantExpr::getFSub(P1, P1), "fsub float " P1STR ", " P1STR); CHECK(ConstantExpr::getMul(P0, P0), "mul i32 " P0STR ", " P0STR); CHECK(ConstantExpr::getFMul(P1, P1), "fmul float " P1STR ", " P1STR); - CHECK(ConstantExpr::getUDiv(P0, P0), "udiv i32 " P0STR ", " P0STR); - CHECK(ConstantExpr::getSDiv(P0, P0), "sdiv i32 " P0STR ", " P0STR); + CHECK(ConstantExpr::getUDiv(P0, P0), "udiv nof i32 " P0STR ", " P0STR); + CHECK(ConstantExpr::getSDiv(P0, P0), "sdiv nof i32 " P0STR ", " P0STR); CHECK(ConstantExpr::getFDiv(P1, P1), "fdiv float " P1STR ", " P1STR); CHECK(ConstantExpr::getURem(P0, P0), "urem i32 " P0STR ", " P0STR); CHECK(ConstantExpr::getSRem(P0, P0), "srem i32 " P0STR ", " P0STR); @@ -267,7 +267,8 @@ CHECK(ConstantExpr::getFPExtend(P1, DoubleTy), "fpext float " P1STR " to double"); - CHECK(ConstantExpr::getExactUDiv(P0, P0), "udiv exact i32 " P0STR ", " P0STR); + CHECK(ConstantExpr::getExactUDiv(P0, P0), + "udiv exact nof i32 " P0STR ", " P0STR); CHECK(ConstantExpr::getSelect(P3, P0, P4), "select i1 " P3STR ", i32 " P0STR ", i32 " P4STR);