Index: lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- lib/Transforms/Vectorize/LoopVectorize.cpp +++ lib/Transforms/Vectorize/LoopVectorize.cpp @@ -5393,9 +5393,17 @@ if (!Ptr) continue; + // True if all users of the pointer operand are memory accesses. + auto UsersAreMemAccesses = all_of(Ptr->users(), [&](User *U) -> bool { + return isa(U) || isa(U); + }); + // Ensure the memory instruction will not be scalarized, making its - // pointer operand non-uniform. - if (memoryInstructionMustBeScalarized(&I)) + // pointer operand non-uniform. If the pointer operand is used by some + // instruction other than a memory access, we're not going to check if + // that other instruction may be scalarized here. Thus, conservatively + // assume the pointer operand may be non-uniform. + if (!UsersAreMemAccesses || memoryInstructionMustBeScalarized(&I)) PossibleNonUniformPtrs.insert(Ptr); // If the memory instruction will be vectorized and its pointer operand @@ -5433,30 +5441,48 @@ } } + // Returns true if Ptr is the pointer operand of a memory access instruction + // I, and I is known to not require scalarization. + auto isVectorizedMemAccessUse = [&](Instruction *I, Value *Ptr) -> bool { + return getPointerOperand(I) == Ptr && !memoryInstructionMustBeScalarized(I); + }; + // For an instruction to be added into Worklist above, all its users inside - // the current loop should be already added into Worklist. This condition - // cannot be true for phi instructions which is always in a dependence loop. - // Because any instruction in the dependence cycle always depends on others - // in the cycle to be added into Worklist first, the result is no ones in - // the cycle will be added into Worklist in the end. - // That is why we process PHI separately. - for (auto &Induction : *getInductionVars()) { - auto *PN = Induction.first; - auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch()); - if (all_of(PN->users(), - [&](User *U) -> bool { - return U == UpdateV || isOutOfScope(U) || - Worklist.count(cast(U)); - }) && - all_of(UpdateV->users(), [&](User *U) -> bool { - return U == PN || isOutOfScope(U) || - Worklist.count(cast(U)); - })) { - Worklist.insert(cast(PN)); - Worklist.insert(cast(UpdateV)); - DEBUG(dbgs() << "LV: Found uniform instruction: " << *PN << "\n"); - DEBUG(dbgs() << "LV: Found uniform instruction: " << *UpdateV << "\n"); - } + // the loop should also be in Worklist. However, this condition cannot be + // true for phi nodes that form a cyclic dependence. We must process phi + // nodes separately. The code below handles both pointer and non-pointer + // induction variables. An induction variable will remain uniform if all + // users of the induction variable and induction variable update remain + // uniform. + for (auto &Induction : Inductions) { + auto *Ind = Induction.first; + auto *IndUpdate = cast(Ind->getIncomingValueForBlock(Latch)); + + // Determine if all users of the induction variable are uniform after + // vectorization. + auto UniformInd = all_of(Ind->users(), [&](User *U) -> bool { + auto *I = cast(U); + return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || + isVectorizedMemAccessUse(I, Ind); + }); + if (!UniformInd) + continue; + + // Determine if all users of the induction variable update instruction are + // uniform after vectorization. + auto UniformIndUpdate = all_of(IndUpdate->users(), [&](User *U) -> bool { + auto *I = cast(U); + return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || + isVectorizedMemAccessUse(I, IndUpdate); + }); + if (!UniformIndUpdate) + continue; + + // The induction variable and its update instruction will remain uniform. + Worklist.insert(Ind); + Worklist.insert(IndUpdate); + DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ind << "\n"); + DEBUG(dbgs() << "LV: Found uniform instruction: " << *IndUpdate << "\n"); } Uniforms.insert(Worklist.begin(), Worklist.end()); Index: test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll =================================================================== --- test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll +++ test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll @@ -269,3 +269,120 @@ for.end: ret void } + +; CHECK-LABEL: pointer_iv_uniform +; +; Check that a pointer induction variable is recognized as uniform and remains +; uniform after vectorization. +; +; CHECK: LV: Found uniform instruction: %p = phi i32* [ %tmp03, %for.body ], [ %a, %entry ] +; CHECK: vector.body +; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] +; CHECK-NOT: getelementptr +; CHECK: %next.gep = getelementptr i32, i32* %a, i64 %index +; CHECK-NOT: getelementptr +; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body +; +define void @pointer_iv_uniform(i32* %a, i32 %x, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ] + %p = phi i32* [ %tmp03, %for.body ], [ %a, %entry ] + store i32 %x, i32* %p, align 8 + %tmp03 = getelementptr inbounds i32, i32* %p, i32 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp slt i64 %i.next, %n + br i1 %cond, label %for.body, label %for.end + +for.end: + ret void +} + +; INTER-LABEL: pointer_iv_non_uniform_0 +; +; Check that a pointer induction variable with a non-uniform user is not +; recognized as uniform and is not uniform after vectorization. The pointer +; induction variable is used by getelementptr instructions that are non-uniform +; due to scalarization of the stores. +; +; INTER-NOT: LV: Found uniform instruction: %p = phi i32* [ %tmp03, %for.body ], [ %a, %entry ] +; INTER: vector.body +; INTER: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] +; INTER: %[[I0:.+]] = shl i64 %index, 2 +; INTER: %next.gep = getelementptr i32, i32* %a, i64 %[[I0]] +; INTER: %[[S1:.+]] = shl i64 %index, 2 +; INTER: %[[I1:.+]] = or i64 %[[S1]], 4 +; INTER: %next.gep2 = getelementptr i32, i32* %a, i64 %[[I1]] +; INTER: %[[S2:.+]] = shl i64 %index, 2 +; INTER: %[[I2:.+]] = or i64 %[[S2]], 8 +; INTER: %next.gep3 = getelementptr i32, i32* %a, i64 %[[I2]] +; INTER: %[[S3:.+]] = shl i64 %index, 2 +; INTER: %[[I3:.+]] = or i64 %[[S3]], 12 +; INTER: %next.gep4 = getelementptr i32, i32* %a, i64 %[[I3]] +; INTER: br i1 {{.*}}, label %middle.block, label %vector.body +; +define void @pointer_iv_non_uniform_0(i32* %a, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ] + %p = phi i32* [ %tmp03, %for.body ], [ %a, %entry ] + %tmp00 = load i32, i32* %p, align 8 + %tmp01 = getelementptr inbounds i32, i32* %p, i32 1 + %tmp02 = load i32, i32* %tmp01, align 8 + %tmp03 = getelementptr inbounds i32, i32* %p, i32 4 + %tmp04 = load i32, i32* %tmp03, align 8 + %tmp05 = getelementptr inbounds i32, i32* %p, i32 5 + %tmp06 = load i32, i32* %tmp05, align 8 + %tmp07 = sub i32 %tmp04, %tmp00 + %tmp08 = sub i32 %tmp02, %tmp02 + %tmp09 = getelementptr inbounds i32, i32* %p, i32 2 + store i32 %tmp07, i32* %tmp09, align 8 + %tmp10 = getelementptr inbounds i32, i32* %p, i32 3 + store i32 %tmp08, i32* %tmp10, align 8 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp slt i64 %i.next, %n + br i1 %cond, label %for.body, label %for.end + +for.end: + ret void +} + +; CHECK-LABEL: pointer_iv_non_uniform_1 +; +; Check that a pointer induction variable with a non-uniform user is not +; recognized as uniform and is not uniform after vectorization. The pointer +; induction variable is used by a store that will be scalarized. +; +; CHECK-NOT: LV: Found uniform instruction: %p = phi x86_fp80* [%tmp1, %for.body], [%a, %entry] +; CHECK: vector.body +; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] +; CHECK: %next.gep = getelementptr x86_fp80, x86_fp80* %a, i64 %index +; CHECK: %[[I1:.+]] = or i64 %index, 1 +; CHECK: %next.gep2 = getelementptr x86_fp80, x86_fp80* %a, i64 %[[I1]] +; CHECK: %[[I2:.+]] = or i64 %index, 2 +; CHECK: %next.gep3 = getelementptr x86_fp80, x86_fp80* %a, i64 %[[I2]] +; CHECK: %[[I3:.+]] = or i64 %index, 3 +; CHECK: %next.gep4 = getelementptr x86_fp80, x86_fp80* %a, i64 %[[I3]] +; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body +; +define void @pointer_iv_non_uniform_1(x86_fp80* %a, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ %i.next, %for.body ], [ 0, %entry ] + %p = phi x86_fp80* [%tmp1, %for.body], [%a, %entry] + %tmp0 = sitofp i32 1 to x86_fp80 + store x86_fp80 %tmp0, x86_fp80* %p, align 16 + %tmp1 = getelementptr inbounds x86_fp80, x86_fp80* %p, i32 1 + %i.next = add i64 %i, 1 + %cond = icmp slt i64 %i.next, %n + br i1 %cond, label %for.body, label %for.end + +for.end: + ret void +}