Index: llvm/lib/Analysis/Delinearization.cpp =================================================================== --- llvm/lib/Analysis/Delinearization.cpp +++ llvm/lib/Analysis/Delinearization.cpp @@ -78,6 +78,9 @@ break; AccessFn = SE->getMinusSCEV(AccessFn, BasePointer); + if (isa(AccessFn)) + break; + O << "\n"; O << "Inst:" << Inst << "\n"; O << "In Loop with Header: " << L->getHeader()->getName() << "\n"; Index: llvm/lib/Analysis/LoopAccessAnalysis.cpp =================================================================== --- llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -1500,6 +1500,9 @@ return Dependence::Unknown; } + if (isa(Dist)) + return Dependence::Unknown; + Type *ATy = APtr->getType()->getPointerElementType(); Type *BTy = BPtr->getType()->getPointerElementType(); auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); Index: llvm/lib/Analysis/ScalarEvolution.cpp =================================================================== --- llvm/lib/Analysis/ScalarEvolution.cpp +++ llvm/lib/Analysis/ScalarEvolution.cpp @@ -4131,6 +4131,10 @@ if (LHS == RHS) return getZero(LHS->getType()); + // Force exercise all the failing paths. + if (LHS->getType()->isPointerTy() && RHS->getType()->isPointerTy()) + return getCouldNotCompute(); + // We represent LHS - RHS as LHS + (-1)*RHS. This transformation // makes it so that we cannot make much use of NUW. auto AddFlags = SCEV::FlagAnyWrap; @@ -5327,7 +5331,8 @@ Flags = setFlags(Flags, SCEV::FlagNW); const SCEV *Ptr = getSCEV(GEP->getPointerOperand()); - if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr))) + const SCEV *Diff = getMinusSCEV(getSCEV(GEP), Ptr); + if (!isa(Diff) && isKnownPositive(Diff)) Flags = setFlags(Flags, SCEV::FlagNUW); } @@ -10059,10 +10064,13 @@ if (Pred == CmpInst::ICMP_EQ) return false; - if (Pred == CmpInst::ICMP_NE) - return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || - CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) || - isKnownNonZero(getMinusSCEV(LHS, RHS)); + if (Pred == CmpInst::ICMP_NE) { + if (CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) || + CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS))) + return true; + auto *Diff = getMinusSCEV(LHS, RHS); + return !isa(Diff) && isKnownNonZero(Diff); + } if (CmpInst::isSigned(Pred)) return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)); Index: llvm/lib/Transforms/Scalar/LoopRerollPass.cpp =================================================================== --- llvm/lib/Transforms/Scalar/LoopRerollPass.cpp +++ llvm/lib/Transforms/Scalar/LoopRerollPass.cpp @@ -911,6 +911,8 @@ // Check that the first root is evenly spaced. unsigned N = DRS.Roots.size() + 1; const SCEV *StepSCEV = SE->getMinusSCEV(SE->getSCEV(DRS.Roots[0]), ADR); + if (isa(StepSCEV)) + return false; const SCEV *ScaleSCEV = SE->getConstant(StepSCEV->getType(), N); if (ADR->getStepRecurrence(*SE) != SE->getMulExpr(StepSCEV, ScaleSCEV)) return false; Index: llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp =================================================================== --- llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -2962,7 +2962,7 @@ // The increment must be loop-invariant so it can be kept in a register. const SCEV *PrevExpr = SE.getSCEV(PrevIV); const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr); - if (!SE.isLoopInvariant(IncExpr, L)) + if (isa(IncExpr) || !SE.isLoopInvariant(IncExpr, L)) continue; if (Chain.isProfitableIncrement(OperExpr, IncExpr, SE)) { @@ -3315,13 +3315,16 @@ // x == y --> x - y == 0 const SCEV *N = SE.getSCEV(NV); +#if 0 if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE)) { // S is normalized, so normalize N before folding it into S // to keep the result normalized. N = normalizeForPostIncUse(N, TmpPostIncLoops, SE); Kind = LSRUse::ICmpZero; S = SE.getMinusSCEV(N, S); + // TODO: Handle this case! } +#endif // -1 and the negations of all interesting strides (except the negation // of -1) are now also interesting.