Index: lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- lib/Transforms/Vectorize/LoopVectorize.cpp +++ lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2260,11 +2260,27 @@ assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && "Val and Step should have the same integer type"); + auto scalarUserIsUniform = [&](User *U) -> bool { + auto *I = cast(U); + return !OrigLoop->contains(I) || !Legal->isScalarAfterVectorization(I) || + Legal->isUniformAfterVectorization(I); + }; + + // Determine the number of scalars we need to generate for each unroll + // iteration. If EntryVal is uniform or all it's scalar users are uniform, we + // only need to generate the first lane. Otherwise, we generate all VF + // values. We are essentially determining if the induction variable has no + // "multi-scalar" (non-uniform scalar) users. + unsigned Lanes = VF; + if (Legal->isUniformAfterVectorization(cast(EntryVal)) || + all_of(EntryVal->users(), scalarUserIsUniform)) + Lanes = 1; + // Compute the scalar steps and save the results in VectorLoopValueMap. ScalarParts Entry(UF); for (unsigned Part = 0; Part < UF; ++Part) { Entry[Part].resize(VF); - for (unsigned Lane = 0; Lane < VF; ++Lane) { + for (unsigned Lane = 0; Lane < Lanes; ++Lane) { auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + Lane); auto *Mul = Builder.CreateMul(StartIdx, Step); auto *Add = Builder.CreateAdd(ScalarIV, Mul); @@ -2447,6 +2463,9 @@ return U; } + assert(Lane > 0 ? !Legal->isUniformAfterVectorization(cast(V)) + : true && "Uniform values only have lane zero"); + // Otherwise, the value from the original loop has been vectorized and is // represented by UF vector values. Extract and return the requested scalar // value from the appropriate vector lane. @@ -2959,11 +2978,16 @@ if (IfPredicateInstr) Cond = createBlockInMask(Instr->getParent()); + // Determine the number of scalars we need to generate for each unroll + // iteration. If the instruction is uniform, we only need to generate the + // first lane. Otherwise, we generate all VF values. + unsigned Lanes = Legal->isUniformAfterVectorization(Instr) ? 1 : VF; + // For each vector unroll 'part': for (unsigned Part = 0; Part < UF; ++Part) { Entry[Part].resize(VF); // For each scalar that we create: - for (unsigned Width = 0; Width < VF; ++Width) { + for (unsigned Width = 0; Width < Lanes; ++Width) { // Start if-block. Value *Cmp = nullptr; @@ -4537,6 +4561,15 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) { // For each instruction in the old loop. for (Instruction &I : *BB) { + + // Scalarize instructions that should remain scalar after vectorization. + if (!(isa(&I) || isa(&I) || + isa(&I)) && + Legal->isScalarAfterVectorization(&I)) { + scalarizeInstruction(&I); + continue; + } + switch (I.getOpcode()) { case Instruction::Br: // Nothing to do for PHIs and BR, since we already took care of the Index: test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll =================================================================== --- test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll +++ test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll @@ -1,5 +1,6 @@ ; RUN: opt < %s -loop-vectorize -S | FileCheck %s +; CHECK: vector.body: ; CHECK: fadd ; CHECK-NEXT: fadd ; CHECK-NEXT: fadd @@ -12,9 +13,8 @@ ; CHECK-NEXT: fadd ; CHECK-NEXT: fadd ; CHECK-NEXT: fadd -; CHECK-NEXT: = ; CHECK-NOT: fadd -; CHECK-SAME: > +; CHECK: middle.block target datalayout = "e-m:e-i64:64-n32:64" target triple = "powerpc64le-ibm-linux-gnu" Index: test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll =================================================================== --- test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll +++ test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll @@ -43,7 +43,7 @@ ; CHECK-LABEL: @s173 ; CHECK: load <4 x float>, <4 x float>* -; CHECK: add i64 %index, 16000 +; CHECK: add nsw i64 %index, 16000 ; CHECK: ret i32 0 } Index: test/Transforms/LoopVectorize/global_alias.ll =================================================================== --- test/Transforms/LoopVectorize/global_alias.ll +++ test/Transforms/LoopVectorize/global_alias.ll @@ -387,7 +387,7 @@ ; return Foo.A[a]; ; } ; CHECK-LABEL: define i32 @noAlias08( -; CHECK: sub <4 x i32> +; CHECK: load <4 x i32> ; CHECK: ret define i32 @noAlias08(i32 %a) #0 { @@ -439,7 +439,7 @@ ; return Foo.A[a]; ; } ; CHECK-LABEL: define i32 @noAlias09( -; CHECK: sub <4 x i32> +; CHECK: load <4 x i32> ; CHECK: ret define i32 @noAlias09(i32 %a) #0 { @@ -721,7 +721,7 @@ ; return Foo.A[a]; ; } ; CHECK-LABEL: define i32 @noAlias14( -; CHECK: sub <4 x i32> +; CHECK: load <4 x i32> ; CHECK: ret define i32 @noAlias14(i32 %a) #0 { Index: test/Transforms/LoopVectorize/induction.ll =================================================================== --- test/Transforms/LoopVectorize/induction.ll +++ test/Transforms/LoopVectorize/induction.ll @@ -78,21 +78,15 @@ ; CHECK: vector.body: ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %[[i0:.+]] = add i64 %index, 0 -; CHECK: %[[i1:.+]] = add i64 %index, 1 ; CHECK: getelementptr inbounds i64, i64* %a, i64 %[[i0]] -; CHECK: getelementptr inbounds i64, i64* %a, i64 %[[i1]] ; ; UNROLL-NO-IC-LABEL: @scalarize_induction_variable_01( ; UNROLL-NO-IC: vector.body: ; UNROLL-NO-IC: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; UNROLL-NO-IC: %[[i0:.+]] = add i64 %index, 0 -; UNROLL-NO-IC: %[[i1:.+]] = add i64 %index, 1 ; UNROLL-NO-IC: %[[i2:.+]] = add i64 %index, 2 -; UNROLL-NO-IC: %[[i3:.+]] = add i64 %index, 3 ; UNROLL-NO-IC: getelementptr inbounds i64, i64* %a, i64 %[[i0]] -; UNROLL-NO-IC: getelementptr inbounds i64, i64* %a, i64 %[[i1]] ; UNROLL-NO-IC: getelementptr inbounds i64, i64* %a, i64 %[[i2]] -; UNROLL-NO-IC: getelementptr inbounds i64, i64* %a, i64 %[[i3]] ; ; IND-LABEL: @scalarize_induction_variable_01( ; IND: vector.body: @@ -611,9 +605,7 @@ ; CHECK: %vec.ind = phi <2 x i32> [ %[[START]], %vector.ph ], [ %vec.ind.next, %vector.body ] ; CHECK: %offset.idx = add i32 %i, %index ; CHECK: %[[A1:.*]] = add i32 %offset.idx, 0 -; CHECK: %[[A2:.*]] = add i32 %offset.idx, 1 ; CHECK: %[[G1:.*]] = getelementptr inbounds i32, i32* %a, i32 %[[A1]] -; CHECK: %[[G2:.*]] = getelementptr inbounds i32, i32* %a, i32 %[[A2]] ; CHECK: %[[G3:.*]] = getelementptr i32, i32* %[[G1]], i32 0 ; CHECK: %[[B1:.*]] = bitcast i32* %[[G3]] to <2 x i32>* ; CHECK: store <2 x i32> %vec.ind, <2 x i32>* %[[B1]] Index: test/Transforms/LoopVectorize/induction_plus.ll =================================================================== --- test/Transforms/LoopVectorize/induction_plus.ll +++ test/Transforms/LoopVectorize/induction_plus.ll @@ -9,7 +9,9 @@ ;CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ;CHECK: %vec.ind = phi <4 x i64> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] ;CHECK: %vec.ind1 = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next2, %vector.body ] -;CHECK: add nsw <4 x i64> %vec.ind, +;CHECK: %[[T1:.+]] = add i64 %index, 0 +;CHECK: %[[T2:.+]] = add nsw i64 %[[T1]], 12 +;CHECK: getelementptr inbounds [1024 x i32], [1024 x i32]* @array, i64 0, i64 %[[T2]] ;CHECK: %vec.ind.next = add <4 x i64> %vec.ind, ;CHECK: %vec.ind.next2 = add <4 x i32> %vec.ind1, ;CHECK: ret i32 Index: test/Transforms/LoopVectorize/reverse_induction.ll =================================================================== --- test/Transforms/LoopVectorize/reverse_induction.ll +++ test/Transforms/LoopVectorize/reverse_induction.ll @@ -8,13 +8,7 @@ ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i64 %startval, %index ; CHECK: %[[a0:.+]] = add i64 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i64 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i64 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i64 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i64 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i64 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i64 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i64 %offset.idx, -7 define i32 @reverse_induction_i64(i64 %startval, i32 * %ptr) { entry: @@ -40,13 +34,7 @@ ; CHECK: %index = phi i128 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i128 %startval, %index ; CHECK: %[[a0:.+]] = add i128 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i128 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i128 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i128 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i128 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i128 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i128 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i128 %offset.idx, -7 define i32 @reverse_induction_i128(i128 %startval, i32 * %ptr) { entry: @@ -72,13 +60,7 @@ ; CHECK: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i16 %startval, {{.*}} ; CHECK: %[[a0:.+]] = add i16 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i16 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i16 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i16 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i16 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i16 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i16 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i16 %offset.idx, -7 define i32 @reverse_induction_i16(i16 %startval, i32 * %ptr) { entry: @@ -121,13 +103,7 @@ ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i64 1023, %index ; CHECK: %[[a0:.+]] = add i64 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i64 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i64 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i64 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i64 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i64 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i64 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i64 %offset.idx, -7 define void @reverse_forward_induction_i64_i8() { entry: @@ -153,13 +129,7 @@ ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i64 1023, %index ; CHECK: %[[a0:.+]] = add i64 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i64 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i64 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i64 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i64 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i64 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i64 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i64 %offset.idx, -7 define void @reverse_forward_induction_i64_i8_signed() { entry: