Index: lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- lib/Transforms/Vectorize/LoopVectorize.cpp +++ lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1611,6 +1611,16 @@ unsigned getNumLoads() const { return LAI->getNumLoads(); } unsigned getNumPredStores() const { return NumPredStores; } + /// Returns true if \p I is a memory instruction that has a consecutive or + /// consecutive-like pointer operand. Consecutive-like pointers are pointers + /// that are treated like consecutive pointers during vectorization. The + /// pointer operands of interleaved accesses are an example. + bool hasConsecutiveLikePtrOperand(Instruction *I); + + /// Returns true if \p I is a memory instruction that may be scalarized + /// during vectorization. + bool memoryInstructionMayBeScalarized(Instruction *I); + private: /// Check if a single basic block loop is vectorizable. /// At this point we know that this is a loop with a constant trip count @@ -2244,11 +2254,29 @@ assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && "Val and Step should have the same integer type"); + auto scalarUserIsUniform = [&](User *U) -> bool { + auto *I = cast(U); + if (!OrigLoop->contains(I)) + return true; + if (!Legal->isScalarAfterVectorization(I)) + return true; + return Legal->isUniformAfterVectorization(I); + }; + + // Determine the number of scalars we need to generate for each unroll + // iteration. If EntryVal is uniform or all it's scalar users are uniform, we + // only need to generate the first lane. Otherwise, we generate all VF + // values. + unsigned Lanes = VF; + if (Legal->isUniformAfterVectorization(cast(EntryVal)) || + all_of(EntryVal->users(), scalarUserIsUniform)) + Lanes = 1; + // Compute the scalar steps and save the results in VectorLoopValueMap. ScalarParts Entry(UF); for (unsigned Part = 0; Part < UF; ++Part) { Entry[Part].resize(VF); - for (unsigned Lane = 0; Lane < VF; ++Lane) { + for (unsigned Lane = 0; Lane < Lanes; ++Lane) { auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + Lane); auto *Mul = Builder.CreateMul(StartIdx, Step); auto *Add = Builder.CreateAdd(ScalarIV, Mul); @@ -2420,6 +2448,9 @@ return U; } + assert(Lane > 0 ? !Legal->isUniformAfterVectorization(cast(V)) + : true && "Uniform values only have lane zero"); + // Otherwise, the value from the original loop has been vectorized and is // represented by UF vector values. Extract and return the requested scalar // value from the appropriate vector lane. @@ -2926,11 +2957,16 @@ Instr->getParent()); } + // Determine the number of scalars we need to generate for each unroll + // iteration. If the instruction is uniform, we only need to generate the + // first lane. Otherwise, we generate all VF values. + unsigned Lanes = Legal->isUniformAfterVectorization(Instr) ? 1 : VF; + // For each vector unroll 'part': for (unsigned Part = 0; Part < UF; ++Part) { Entry[Part].resize(VF); // For each scalar that we create: - for (unsigned Width = 0; Width < VF; ++Width) { + for (unsigned Width = 0; Width < Lanes; ++Width) { // Start if-block. Value *Cmp = nullptr; @@ -4510,6 +4546,14 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { // For each instruction in the old loop. for (Instruction &I : *BB) { + + // Scalarize instructions that should remain scalar after vectorization. + if (!(isa(&I) || isa(&I)) && + Legal->isScalarAfterVectorization(&I)) { + scalarizeInstruction(&I); + continue; + } + switch (I.getOpcode()) { case Instruction::Br: // Nothing to do for PHIs and BR, since we already took care of the @@ -5260,6 +5304,44 @@ } } +bool LoopVectorizationLegality::hasConsecutiveLikePtrOperand(Instruction *I) { + if (isAccessInterleaved(I) && !getInterleavedAccessGroup(I)->isReverse()) + return true; + if (auto *Ptr = getPointerOperand(I)) + return isConsecutivePtr(Ptr); + return false; +} + +bool LoopVectorizationLegality::memoryInstructionMayBeScalarized( + Instruction *I) { + + // If the instruction doesn't have a consecutive or consecutive-like pointer + // operand, it may be scalarized. + if (!hasConsecutiveLikePtrOperand(I)) + return true; + + // Get and ensure we have a valid memory instruction. + LoadInst *LI = dyn_cast(I); + StoreInst *SI = dyn_cast(I); + assert((LI || SI) && "Invalid memory instruction"); + + // If the instruction is a store located in a predicated block, it will be + // scalarized. + if (SI && blockNeedsPredication(SI->getParent()) && !isMaskRequired(SI)) + return true; + + // If the instruction's allocated type size doesn't equal it's stored type + // size, it requires padding and may be scalarized. + auto &DL = I->getModule()->getDataLayout(); + auto *ScalarType = LI ? LI->getType() : SI->getValueOperand()->getType(); + if (DL.getTypeAllocSize(ScalarType) != DL.getTypeStoreSize(ScalarType)) + return true; + + // Otherwise, the memory instruction should be vectorized if the rest of the + // loop is. + return false; +} + void LoopVectorizationLegality::collectLoopUniforms() { // We now know that the loop is vectorizable! // Collect instructions inside the loop that will remain uniform after @@ -5281,21 +5363,40 @@ DEBUG(dbgs() << "LV: Found uniform instruction: " << *Cmp << "\n"); } - // Add all consecutive pointer values; these values will be uniform after - // vectorization (and subsequent cleanup). Although non-consecutive, we also - // add the pointer operands of interleaved accesses since they are treated - // like consecutive pointers during vectorization. + // Holds consecutive and consecutive-like pointers. Consecutive-like pointers + // are pointers that are treated like consecutive pointers during + // vectorization. The pointer operands of interleaved accesses are an + // example. + SmallPtrSet ConsecutiveLikePtrs; + + // Holds pointer operands of instructions that are possibly non-uniform. + SmallPtrSet PossibleNonUniformPtrs; + + // Iterate over the instructions in the loop, and collect all consecutive and + // consecutive-like pointer operands in ConsecutiveLikePtrs. If it's possible + // that a pointer operand use will be scalarized, we collect the pointer + // operand in PossibleNonUniformPtrs instead. We use two sets here because a + // single getelementptr instruction can be used by both vectorized and + // scalarized memory instructions. For example, if a loop loads and stores + // from the same location, but the store is conditional, the store will be + // scalarized, and the getelementptr won't remain uniform. for (auto *BB : TheLoop->blocks()) for (auto &I : *BB) { - Instruction *Ptr = nullptr; - if (I.getType()->isPointerTy() && isConsecutivePtr(&I)) - Ptr = &I; - else if (isAccessInterleaved(&I)) - Ptr = cast(getPointerOperand(&I)); - else + auto *Ptr = dyn_cast_or_null(getPointerOperand(&I)); + if (!Ptr) continue; - Worklist.insert(Ptr); - DEBUG(dbgs() << "LV: Found uniform instruction: " << *Ptr << "\n"); + if (memoryInstructionMayBeScalarized(&I)) + PossibleNonUniformPtrs.insert(Ptr); + else + ConsecutiveLikePtrs.insert(Ptr); + } + + // Add to the Worklist all consecutive and consecutive-like pointers that + // aren't also identified as possibly non-uniform. + for (auto *V : ConsecutiveLikePtrs) + if (!PossibleNonUniformPtrs.count(V)) { + DEBUG(dbgs() << "LV: Found uniform instruction: " << *V << "\n"); + Worklist.insert(V); } // Expand Worklist in topological order: whenever a new instruction Index: test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll =================================================================== --- test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll +++ test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll @@ -1,5 +1,6 @@ ; RUN: opt < %s -loop-vectorize -S | FileCheck %s +; CHECK: vector.body: ; CHECK: fadd ; CHECK-NEXT: fadd ; CHECK-NEXT: fadd @@ -12,9 +13,8 @@ ; CHECK-NEXT: fadd ; CHECK-NEXT: fadd ; CHECK-NEXT: fadd -; CHECK-NEXT: = ; CHECK-NOT: fadd -; CHECK-SAME: > +; CHECK: middle.block target datalayout = "e-m:e-i64:64-n32:64" target triple = "powerpc64le-ibm-linux-gnu" Index: test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll =================================================================== --- test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll +++ test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll @@ -43,7 +43,7 @@ ; CHECK-LABEL: @s173 ; CHECK: load <4 x float>, <4 x float>* -; CHECK: add i64 %index, 16000 +; CHECK: add nsw i64 %index, 16000 ; CHECK: ret i32 0 } Index: test/Transforms/LoopVectorize/global_alias.ll =================================================================== --- test/Transforms/LoopVectorize/global_alias.ll +++ test/Transforms/LoopVectorize/global_alias.ll @@ -387,7 +387,7 @@ ; return Foo.A[a]; ; } ; CHECK-LABEL: define i32 @noAlias08( -; CHECK: sub <4 x i32> +; CHECK: load <4 x i32> ; CHECK: ret define i32 @noAlias08(i32 %a) #0 { @@ -439,7 +439,7 @@ ; return Foo.A[a]; ; } ; CHECK-LABEL: define i32 @noAlias09( -; CHECK: sub <4 x i32> +; CHECK: load <4 x i32> ; CHECK: ret define i32 @noAlias09(i32 %a) #0 { @@ -721,7 +721,7 @@ ; return Foo.A[a]; ; } ; CHECK-LABEL: define i32 @noAlias14( -; CHECK: sub <4 x i32> +; CHECK: load <4 x i32> ; CHECK: ret define i32 @noAlias14(i32 %a) #0 { Index: test/Transforms/LoopVectorize/induction.ll =================================================================== --- test/Transforms/LoopVectorize/induction.ll +++ test/Transforms/LoopVectorize/induction.ll @@ -78,21 +78,15 @@ ; CHECK: vector.body: ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %[[i0:.+]] = add i64 %index, 0 -; CHECK: %[[i1:.+]] = add i64 %index, 1 ; CHECK: getelementptr inbounds i64, i64* %a, i64 %[[i0]] -; CHECK: getelementptr inbounds i64, i64* %a, i64 %[[i1]] ; ; UNROLL-NO-IC-LABEL: @scalarize_induction_variable_01( ; UNROLL-NO-IC: vector.body: ; UNROLL-NO-IC: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; UNROLL-NO-IC: %[[i0:.+]] = add i64 %index, 0 -; UNROLL-NO-IC: %[[i1:.+]] = add i64 %index, 1 ; UNROLL-NO-IC: %[[i2:.+]] = add i64 %index, 2 -; UNROLL-NO-IC: %[[i3:.+]] = add i64 %index, 3 ; UNROLL-NO-IC: getelementptr inbounds i64, i64* %a, i64 %[[i0]] -; UNROLL-NO-IC: getelementptr inbounds i64, i64* %a, i64 %[[i1]] ; UNROLL-NO-IC: getelementptr inbounds i64, i64* %a, i64 %[[i2]] -; UNROLL-NO-IC: getelementptr inbounds i64, i64* %a, i64 %[[i3]] ; ; IND-LABEL: @scalarize_induction_variable_01( ; IND: vector.body: @@ -611,9 +605,7 @@ ; CHECK: %vec.ind = phi <2 x i32> [ %[[START]], %vector.ph ], [ %vec.ind.next, %vector.body ] ; CHECK: %offset.idx = add i32 %i, %index ; CHECK: %[[A1:.*]] = add i32 %offset.idx, 0 -; CHECK: %[[A2:.*]] = add i32 %offset.idx, 1 ; CHECK: %[[G1:.*]] = getelementptr inbounds i32, i32* %a, i32 %[[A1]] -; CHECK: %[[G2:.*]] = getelementptr inbounds i32, i32* %a, i32 %[[A2]] ; CHECK: %[[G3:.*]] = getelementptr i32, i32* %[[G1]], i32 0 ; CHECK: %[[B1:.*]] = bitcast i32* %[[G3]] to <2 x i32>* ; CHECK: store <2 x i32> %vec.ind, <2 x i32>* %[[B1]] Index: test/Transforms/LoopVectorize/induction_plus.ll =================================================================== --- test/Transforms/LoopVectorize/induction_plus.ll +++ test/Transforms/LoopVectorize/induction_plus.ll @@ -9,7 +9,9 @@ ;CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ;CHECK: %vec.ind = phi <4 x i64> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] ;CHECK: %vec.ind1 = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next2, %vector.body ] -;CHECK: add nsw <4 x i64> %vec.ind, +;CHECK: %[[T1:.+]] = add i64 %index, 0 +;CHECK: %[[T2:.+]] = add nsw i64 %[[T1]], 12 +;CHECK: getelementptr inbounds [1024 x i32], [1024 x i32]* @array, i64 0, i64 %[[T2]] ;CHECK: %vec.ind.next = add <4 x i64> %vec.ind, ;CHECK: %vec.ind.next2 = add <4 x i32> %vec.ind1, ;CHECK: ret i32 Index: test/Transforms/LoopVectorize/reverse_induction.ll =================================================================== --- test/Transforms/LoopVectorize/reverse_induction.ll +++ test/Transforms/LoopVectorize/reverse_induction.ll @@ -8,13 +8,7 @@ ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i64 %startval, %index ; CHECK: %[[a0:.+]] = add i64 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i64 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i64 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i64 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i64 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i64 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i64 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i64 %offset.idx, -7 define i32 @reverse_induction_i64(i64 %startval, i32 * %ptr) { entry: @@ -40,13 +34,7 @@ ; CHECK: %index = phi i128 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i128 %startval, %index ; CHECK: %[[a0:.+]] = add i128 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i128 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i128 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i128 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i128 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i128 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i128 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i128 %offset.idx, -7 define i32 @reverse_induction_i128(i128 %startval, i32 * %ptr) { entry: @@ -72,13 +60,7 @@ ; CHECK: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i16 %startval, {{.*}} ; CHECK: %[[a0:.+]] = add i16 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i16 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i16 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i16 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i16 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i16 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i16 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i16 %offset.idx, -7 define i32 @reverse_induction_i16(i16 %startval, i32 * %ptr) { entry: @@ -121,13 +103,7 @@ ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i64 1023, %index ; CHECK: %[[a0:.+]] = add i64 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i64 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i64 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i64 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i64 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i64 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i64 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i64 %offset.idx, -7 define void @reverse_forward_induction_i64_i8() { entry: @@ -153,13 +129,7 @@ ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i64 1023, %index ; CHECK: %[[a0:.+]] = add i64 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i64 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i64 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i64 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i64 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i64 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i64 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i64 %offset.idx, -7 define void @reverse_forward_induction_i64_i8_signed() { entry: