Index: llvm/lib/Analysis/LoopAccessAnalysis.cpp =================================================================== --- llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -1205,6 +1205,55 @@ return false; } +/// Given a non-contant dependence-distance \p Dist between two accesses, +/// that have the same stride \p Stride and the same type size \p TypeByteSize, +/// in a loop whose takenCount is \p BackedgeTakenCount, check if it is +/// possible to prove statically that the dependence distance is larger +/// than the range that the accesses will travel through the execution of +/// the loop. If so, return true; false otherwise. This is useful for +/// example in loops such as the following (PR31098): +/// for (i = 0; i < D; ++i) { +/// = out[i]; +/// out[i+D] = +/// } +static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, + const SCEV &BackedgeTakenCount, + const SCEV &Dist, uint64_t Stride, + uint64_t TypeByteSize) { + + // If we can prove that + // (**) |dist_in_bytes| > takenCount * stepInBytes + // then there is no dependence. + // (this is equivalent to what the SIV data-dependence test would do). + const uint64_t ByteStride = Stride * TypeByteSize; + const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride); + const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step); + + const SCEV *CastedDist = &Dist; + const SCEV *CastedProduct = Product; + uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType()); + uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType()); + if (DistTypeSize > ProductTypeSize) + CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType()); + else + CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType()); + + // Is (Dist > takenCount * stepInBytes) ? + // (If so, then we have proven (**) because |dist| >= dist) + const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct); + if (SE.isKnownPositive(Minus)) + return true; + + // Second try: Is (-Dist > takenCount * stepInBytes) ? + // (If so, then we have proven (**) because |dist| >= -1*dist) + const SCEV *NegDist = SE.getNegativeSCEV(CastedDist); + Minus = SE.getMinusSCEV(NegDist, CastedProduct); + if (SE.isKnownPositive(Minus)) + return true; + + return false; +} + /// \brief Check the dependence for two accesses with the same stride \p Stride. /// \p Distance is the positive distance and \p TypeByteSize is type size in /// bytes. @@ -1292,21 +1341,26 @@ return Dependence::Unknown; } + Type *ATy = APtr->getType()->getPointerElementType(); + Type *BTy = BPtr->getType()->getPointerElementType(); + auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); + uint64_t TypeByteSize = DL.getTypeAllocSize(ATy); + uint64_t Stride = std::abs(StrideAPtr); const SCEVConstant *C = dyn_cast(Dist); if (!C) { + if (TypeByteSize == DL.getTypeAllocSize(BTy) && + isSafeDependenceDistance(DL, *(PSE.getSE()), + *(PSE.getBackedgeTakenCount()), *Dist, Stride, + TypeByteSize)) + return Dependence::NoDep; + DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); ShouldRetryWithRuntimeCheck = true; return Dependence::Unknown; } - Type *ATy = APtr->getType()->getPointerElementType(); - Type *BTy = BPtr->getType()->getPointerElementType(); - auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout(); - uint64_t TypeByteSize = DL.getTypeAllocSize(ATy); - const APInt &Val = C->getAPInt(); int64_t Distance = Val.getSExtValue(); - uint64_t Stride = std::abs(StrideAPtr); // Attempt to prove strided accesses independent. if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy && Index: llvm/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll =================================================================== --- llvm/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll +++ llvm/test/Analysis/LoopAccessAnalysis/multiple-strides-rt-memory-checks.ll @@ -13,9 +13,9 @@ ; int v3[Z][Z]; ; } s; ; -; void slow_function (s* const obj) { +; void slow_function (s* const obj, int z) { ; for (int j=0; jv1[k] + obj->v2[j]; ; obj->v3[j][k] += x; ; } @@ -35,7 +35,7 @@ %struct.s = type { [32 x i32], [32 x i32], [32 x [32 x i32]] } -define void @Test(%struct.s* nocapture %obj) #0 { +define void @Test(%struct.s* nocapture %obj, i64 %z) #0 { br label %.outer.preheader @@ -63,6 +63,6 @@ %8 = add nsw i32 %5, %7 store i32 %8, i32* %6 %j.next = add nuw nsw i64 %j, 1 - %exitcond.inner = icmp eq i64 %j.next, 32 + %exitcond.inner = icmp eq i64 %j.next, %z br i1 %exitcond.inner, label %.outer, label %.inner } Index: llvm/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll +++ llvm/test/Transforms/LoopVectorize/multiple-strides-vectorization.ll @@ -13,9 +13,9 @@ ; int v3[Z][Z]; ; } s; ; -; void slow_function (s* const obj) { +; void slow_function (s* const obj, int z) { ; for (int j=0; jv1[k] + obj->v2[j]; ; obj->v3[j][k] += x; ; } @@ -31,7 +31,7 @@ %struct.s = type { [32 x i32], [32 x i32], [32 x [32 x i32]] } -define void @Test(%struct.s* nocapture %obj) #0 { +define void @Test(%struct.s* nocapture %obj, i64 %z) #0 { br label %.outer.preheader @@ -59,6 +59,6 @@ %8 = add nsw i32 %5, %7 store i32 %8, i32* %6 %j.next = add nuw nsw i64 %j, 1 - %exitcond.inner = icmp eq i64 %j.next, 32 + %exitcond.inner = icmp eq i64 %j.next, %z br i1 %exitcond.inner, label %.outer, label %.inner } Index: llvm/test/Transforms/LoopVectorize/pr31098.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/pr31098.ll +++ llvm/test/Transforms/LoopVectorize/pr31098.ll @@ -0,0 +1,100 @@ +; REQUIRES: asserts +; RUN: opt -S -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -enable-interleaved-mem-accesses=true -debug-only=loop-accesses < %s 2>&1 | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" + +; Check that the compile-time-unknown depenendece-distance is resolved +; statically. Due to the non-unit stride of the accesses in this testcase +; we are currently not able to create runtime dependence checks, and therefore +; if we don't resolve the dependence statically we cannot vectorize the loop. +; +; Specifically in this example, during dependence analysis we get 6 unknown +; dependence distances between the 8 real/imaginary accesses below: +; dist = 8*D, 4+8*D, -4+8*D, -8*D, 4-8*D, -4-8*D. +; At compile time we can prove for all of the above that |dist|>loopBound*step +; (where the step is 8bytes, and the loopBound is D-1), and thereby conclude +; that there are no dependencies (without runtime tests): +; |8*D|>8*D-8, |4+8*D|>8*D-8, |-4+8*D|>8*D-8, etc. + +; #include +; class Complex { +; private: +; float real_; +; float imaginary_; +; +; public: +; Complex() : real_(0), imaginary_(0) { } +; Complex(float real, float imaginary) : real_(real), imaginary_(imaginary) { } +; Complex(const Complex &rhs) : real_(rhs.real()), imaginary_(rhs.imaginary()) { } +; +; inline float real() const { return real_; } +; inline float imaginary() const { return imaginary_; } +; +; Complex operator+(const Complex& rhs) const +; { +; return Complex(real_ + rhs.real_, imaginary_ + rhs.imaginary_); +; } +; +; Complex operator-(const Complex& rhs) const +; { +; return Complex(real_ - rhs.real_, imaginary_ - rhs.imaginary_); +; } +; }; +; +; void Test(Complex *out, size_t size) +; { +; size_t D = size / 2; +; for (size_t offset = 0; offset < D; ++offset) +; { +; Complex t0 = out[offset]; +; Complex t1 = out[offset + D]; +; out[offset] = t1 + t0; +; out[offset + D] = t0 - t1; +; } +; } + +; CHECK-LABEL: Test +; CHECK: LAA: No unsafe dependent memory operations in loop. We don't need runtime memory checks. +; CHECK: vector.body: +; CHECK: <4 x i32> + +%class.Complex = type { float, float } + +define void @Test(%class.Complex* nocapture %out, i64 %size) local_unnamed_addr { +entry: + %div = lshr i64 %size, 1 + %cmp47 = icmp eq i64 %div, 0 + br i1 %cmp47, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: + br label %for.body + +for.cond.cleanup.loopexit: + br label %for.cond.cleanup + +for.cond.cleanup: + ret void + +for.body: + %offset.048 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ] + %0 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 0 + %1 = load float, float* %0, align 4 + %imaginary_.i.i = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %offset.048, i32 1 + %2 = load float, float* %imaginary_.i.i, align 4 + %add = add nuw i64 %offset.048, %div + %3 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 0 + %4 = load float, float* %3, align 4 + %imaginary_.i.i28 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add, i32 1 + %5 = load float, float* %imaginary_.i.i28, align 4 + %add.i = fadd fast float %4, %1 + %add4.i = fadd fast float %5, %2 + store float %add.i, float* %0, align 4 + store float %add4.i, float* %imaginary_.i.i, align 4 + %sub.i = fsub fast float %1, %4 + %sub4.i = fsub fast float %2, %5 + store float %sub.i, float* %3, align 4 + store float %sub4.i, float* %imaginary_.i.i28, align 4 + %inc = add nuw nsw i64 %offset.048, 1 + %exitcond = icmp eq i64 %inc, %div + br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body +}