Index: lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- lib/Transforms/Vectorize/LoopVectorize.cpp +++ lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4893,6 +4893,19 @@ StrideSet.insert(Stride); } +/// \brief Check if the load/store instruction \p I may be translated into +/// gather/scatter during vectorization. +/// +/// Pointer \p Ptr specifies address in memory for the given scalar memory +/// instruction. We need it to retrieve data type. +/// Using gather/scatter is possible when it is supported by target. +static bool isGatherOrScatterLegal(Instruction *I, Value *Ptr, + LoopVectorizationLegality *Legal) { + Type *DataTy = cast(Ptr->getType())->getElementType(); + return (isa(I) && Legal->isLegalMaskedGather(DataTy)) || + (isa(I) && Legal->isLegalMaskedScatter(DataTy)); +} + void LoopVectorizationLegality::collectLoopUniforms() { // We now know that the loop is vectorizable! // Collect variables that will remain uniform after vectorization. @@ -4902,15 +4915,24 @@ // Start with the conditional branch and walk up the block. Worklist.push_back(Latch->getTerminator()->getOperand(0)); - // Also add all consecutive pointer values; these values will be uniform + // Also add all consecutive pointer values and all nonconsecutive pointer + // values if gather/scatter is not supported; these values will be uniform // after vectorization (and subsequent cleanup) and, until revectorization is // supported, all dependencies must also be uniform. for (Loop::block_iterator B = TheLoop->block_begin(), BE = TheLoop->block_end(); - B != BE; ++B) - for (BasicBlock::iterator I = (*B)->begin(), IE = (*B)->end(); I != IE; ++I) - if (I->getType()->isPointerTy() && isConsecutivePtr(&*I)) - Worklist.insert(Worklist.end(), I->op_begin(), I->op_end()); + B != BE; ++B) { + for (BasicBlock::iterator I = (*B)->begin(), IE = (*B)->end(); I != IE; + ++I) { + LoadInst *LI = dyn_cast(&*I); + StoreInst *SI = dyn_cast(&*I); + if (!LI && !SI) + continue; + Value *Ptr = SI ? SI->getPointerOperand() : LI->getPointerOperand(); + if (isConsecutivePtr(Ptr) || !isGatherOrScatterLegal(&*I, Ptr, this)) + Worklist.push_back(Ptr); + } + } while (!Worklist.empty()) { Instruction *I = dyn_cast(Worklist.back()); @@ -5794,19 +5816,6 @@ return Cost; } -/// \brief Check if the load/store instruction \p I may be translated into -/// gather/scatter during vectorization. -/// -/// Pointer \p Ptr specifies address in memory for the given scalar memory -/// instruction. We need it to retrieve data type. -/// Using gather/scatter is possible when it is supported by target. -static bool isGatherOrScatterLegal(Instruction *I, Value *Ptr, - LoopVectorizationLegality *Legal) { - Type *DataTy = cast(Ptr->getType())->getElementType(); - return (isa(I) && Legal->isLegalMaskedGather(DataTy)) || - (isa(I) && Legal->isLegalMaskedScatter(DataTy)); -} - /// \brief Check whether the address computation for a non-consecutive memory /// access looks like an unlikely candidate for being merged into the indexing /// mode. @@ -6228,30 +6237,17 @@ VecValuesToIgnore.insert(Casts.begin(), Casts.end()); } - // Ignore induction phis that are only used in either GetElementPtr or ICmp - // instruction to exit loop. Induction variables usually have large types and - // can have big impact when estimating register usage. - // This is for when VF > 1. + // Ignore induction phis that are only used in uniform instructions + // since we don't need vector versions for such induction vars. Induction + // variables usually have large types so it is important not to exaggerate + // their register usages. This is for when VF > 1. for (auto &Induction : *Legal->getInductionVars()) { auto *PN = Induction.first; - auto *UpdateV = PN->getIncomingValueForBlock(TheLoop->getLoopLatch()); - // Check that the PHI is only used by the induction increment (UpdateV) or - // by GEPs. Then check that UpdateV is only used by a compare instruction or - // the loop header PHI. - // FIXME: Need precise def-use analysis to determine if this instruction - // variable will be vectorized. - if (std::all_of(PN->user_begin(), PN->user_end(), - [&](const User *U) -> bool { - return U == UpdateV || isa(U); - }) && - std::all_of(UpdateV->user_begin(), UpdateV->user_end(), - [&](const User *U) -> bool { - return U == PN || isa(U); - })) { + if (std::all_of(PN->user_begin(), PN->user_end(), [&](User *U) -> bool { + return Legal->isUniformAfterVectorization(cast(U)); + })) VecValuesToIgnore.insert(PN); - VecValuesToIgnore.insert(UpdateV); - } } // Ignore instructions that will not be vectorized. @@ -6259,31 +6255,9 @@ for (auto bb = TheLoop->block_begin(), be = TheLoop->block_end(); bb != be; ++bb) { for (auto &Inst : **bb) { - switch (Inst.getOpcode()) - case Instruction::GetElementPtr: { - // Ignore GEP if its last operand is an induction variable so that it is - // a consecutive load/store and won't be vectorized as scatter/gather - // pattern. - - GetElementPtrInst *Gep = cast(&Inst); - unsigned NumOperands = Gep->getNumOperands(); - unsigned InductionOperand = getGEPInductionOperand(Gep); - bool GepToIgnore = true; - - // Check that all of the gep indices are uniform except for the - // induction operand. - for (unsigned i = 0; i != NumOperands; ++i) { - if (i != InductionOperand && - !PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), - TheLoop)) { - GepToIgnore = false; - break; - } - } - - if (GepToIgnore) - VecValuesToIgnore.insert(&Inst); - break; + if (Legal->isUniformAfterVectorization(&Inst)) { + VecValuesToIgnore.insert(&Inst); + continue; } } } Index: test/Transforms/LoopVectorize/X86/reg-usage.ll =================================================================== --- test/Transforms/LoopVectorize/X86/reg-usage.ll +++ test/Transforms/LoopVectorize/X86/reg-usage.ll @@ -1,9 +1,7 @@ -; RUN: opt < %s -debug-only=loop-vectorize -loop-vectorize -vectorizer-maximize-bandwidth -O2 -S 2>&1 | FileCheck %s +; RUN: opt < %s -debug-only=loop-vectorize -loop-vectorize -vectorizer-maximize-bandwidth -O2 -mtriple=x86_64-unknown-linux -S 2>&1 | FileCheck %s +; RUN: opt < %s -debug-only=loop-vectorize -loop-vectorize -vectorizer-maximize-bandwidth -O2 -mtriple=x86_64-unknown-linux -mattr=+avx512f -S 2>&1 | FileCheck %s --check-prefix=AVX512F ; REQUIRES: asserts -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - @a = global [1024 x i8] zeroinitializer, align 16 @b = global [1024 x i8] zeroinitializer, align 16 @@ -45,6 +43,42 @@ br i1 %exitcond, label %for.cond.cleanup, label %for.body } +define i32 @goo() { +; CHECK-LABEL: goo +; CHECK: LV(REG): VF = 4 +; CHECK-NEXT: LV(REG): Found max usage: 4 +; CHECK: LV(REG): VF = 8 +; CHECK-NEXT: LV(REG): Found max usage: 7 +; CHECK: LV(REG): VF = 16 +; CHECK-NEXT: LV(REG): Found max usage: 13 +entry: + br label %for.body + +for.cond.cleanup: ; preds = %for.body + %add.lcssa = phi i32 [ %add, %for.body ] + ret i32 %add.lcssa + +for.body: ; preds = %for.body, %entry + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %s.015 = phi i32 [ 0, %entry ], [ %add, %for.body ] + %tmp1 = add nsw i64 %indvars.iv, 3 + %arrayidx = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %tmp1 + %tmp = load i8, i8* %arrayidx, align 1 + %conv = zext i8 %tmp to i32 + %tmp2 = add nsw i64 %indvars.iv, 2 + %arrayidx2 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %tmp2 + %tmp3 = load i8, i8* %arrayidx2, align 1 + %conv3 = zext i8 %tmp3 to i32 + %sub = sub nsw i32 %conv, %conv3 + %ispos = icmp sgt i32 %sub, -1 + %neg = sub nsw i32 0, %sub + %tmp4 = select i1 %ispos, i32 %sub, i32 %neg + %add = add nsw i32 %tmp4, %s.015 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond = icmp eq i64 %indvars.iv.next, 1024 + br i1 %exitcond, label %for.cond.cleanup, label %for.body +} + define i64 @bar(i64* nocapture %a) { ; CHECK-LABEL: bar ; CHECK: LV(REG): VF = 2 @@ -69,3 +103,31 @@ %exitcond = icmp eq i64 %inc, 1024 br i1 %exitcond, label %for.cond.cleanup, label %for.body } + +@d = external global [0 x i64], align 8 +@e = external global [0 x i32], align 4 +@c = external global [0 x i32], align 4 + +define void @hoo(i32 %n) { +; AVX512F-LABEL: bar +; AVX512F: LV(REG): VF = 16 +; AVX512F: LV(REG): Found max usage: 2 +; +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx = getelementptr inbounds [0 x i64], [0 x i64]* @d, i64 0, i64 %indvars.iv + %tmp = load i64, i64* %arrayidx, align 8 + %arrayidx1 = getelementptr inbounds [0 x i32], [0 x i32]* @e, i64 0, i64 %tmp + %tmp1 = load i32, i32* %arrayidx1, align 4 + %arrayidx3 = getelementptr inbounds [0 x i32], [0 x i32]* @c, i64 0, i64 %indvars.iv + store i32 %tmp1, i32* %arrayidx3, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond = icmp eq i64 %indvars.iv.next, 10000 + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body + ret void +}