Index: lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- lib/Transforms/Vectorize/LoopVectorize.cpp +++ lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2260,11 +2260,27 @@ assert(ScalarIVTy->isIntegerTy() && ScalarIVTy == Step->getType() && "Val and Step should have the same integer type"); + auto scalarUserIsUniform = [&](User *U) -> bool { + auto *I = cast(U); + return !OrigLoop->contains(I) || !Legal->isScalarAfterVectorization(I) || + Legal->isUniformAfterVectorization(I); + }; + + // Determine the number of scalars we need to generate for each unroll + // iteration. If EntryVal is uniform or all it's scalar users are uniform, we + // only need to generate the first lane. Otherwise, we generate all VF + // values. We are essentially determining if the induction variable has no + // "multi-scalar" (non-uniform scalar) users. + unsigned Lanes = VF; + if (Legal->isUniformAfterVectorization(cast(EntryVal)) || + all_of(EntryVal->users(), scalarUserIsUniform)) + Lanes = 1; + // Compute the scalar steps and save the results in VectorLoopValueMap. ScalarParts Entry(UF); for (unsigned Part = 0; Part < UF; ++Part) { Entry[Part].resize(VF); - for (unsigned Lane = 0; Lane < VF; ++Lane) { + for (unsigned Lane = 0; Lane < Lanes; ++Lane) { auto *StartIdx = ConstantInt::get(ScalarIVTy, VF * Part + Lane); auto *Mul = Builder.CreateMul(StartIdx, Step); auto *Add = Builder.CreateAdd(ScalarIV, Mul); @@ -2385,6 +2401,9 @@ // Initialize a new vector map entry. VectorParts Entry(UF); + // If we've scalarized a value, that value should be an instruction. + auto *I = cast(V); + // If we aren't vectorizing, we can just copy the scalar map values over to // the vector map. if (VF == 1) { @@ -2395,7 +2414,8 @@ // Get the last scalarized instruction. This corresponds to the instruction // we created for the last vector lane on the last unroll iteration. - auto *LastInst = cast(getScalarValue(V, UF - 1, VF - 1)); + auto LastLane = Legal->isUniformAfterVectorization(I) ? 0 : VF - 1; + auto *LastInst = cast(getScalarValue(V, UF - 1, LastLane)); // Set the insert point after the last scalarized instruction. This ensures // the insertelement sequence will directly follow the scalar definitions. @@ -2408,9 +2428,11 @@ // in VectorLoopValueMap, we will only generate the insertelements once. for (unsigned Part = 0; Part < UF; ++Part) { Value *Insert = UndefValue::get(VectorType::get(V->getType(), VF)); - for (unsigned Width = 0; Width < VF; ++Width) + for (unsigned Width = 0; Width < VF; ++Width) { + auto Lane = Legal->isUniformAfterVectorization(I) ? 0 : Width; Insert = Builder.CreateInsertElement( - Insert, getScalarValue(V, Part, Width), Builder.getInt32(Width)); + Insert, getScalarValue(V, Part, Lane), Builder.getInt32(Width)); + } Entry[Part] = Insert; } Builder.restoreIP(OldIP); @@ -2431,6 +2453,10 @@ if (OrigLoop->isLoopInvariant(V)) return V; + // If the value is uniform after vectorization, use the first lane. + if (Legal->isUniformAfterVectorization(cast(V))) + Lane = 0; + // If the value from the original loop has not been vectorized, it is // represented by UF x VF scalar values in the new loop. Return the requested // scalar value. @@ -2959,11 +2985,16 @@ if (IfPredicateInstr) Cond = createBlockInMask(Instr->getParent()); + // Determine the number of scalars we need to generate for each unroll + // iteration. If the instruction is uniform, we only need to generate the + // first lane. Otherwise, we generate all VF values. + unsigned Lanes = Legal->isUniformAfterVectorization(Instr) ? 1 : VF; + // For each vector unroll 'part': for (unsigned Part = 0; Part < UF; ++Part) { Entry[Part].resize(VF); // For each scalar that we create: - for (unsigned Width = 0; Width < VF; ++Width) { + for (unsigned Width = 0; Width < Lanes; ++Width) { // Start if-block. Value *Cmp = nullptr; @@ -4473,12 +4504,16 @@ // This is the normalized GEP that starts counting at zero. Value *PtrInd = Induction; PtrInd = Builder.CreateSExtOrTrunc(PtrInd, II.getStep()->getType()); + // Determine the number of scalars we need to generate for each unroll + // iteration. If the instruction is uniform, we only need to generate the + // first lane. Otherwise, we generate all VF values. + unsigned Lanes = Legal->isUniformAfterVectorization(P) ? 1 : VF; // These are the scalar results. Notice that we don't generate vector GEPs // because scalar GEPs result in better code. ScalarParts Entry(UF); for (unsigned Part = 0; Part < UF; ++Part) { Entry[Part].resize(VF); - for (unsigned Lane = 0; Lane < VF; ++Lane) { + for (unsigned Lane = 0; Lane < Lanes; ++Lane) { Constant *Idx = ConstantInt::get(PtrInd->getType(), Lane + Part * VF); Value *GlobalIdx = Builder.CreateAdd(PtrInd, Idx); Value *SclrGep = II.transform(Builder, GlobalIdx, PSE.getSE(), DL); Index: test/Transforms/LoopVectorize/induction.ll =================================================================== --- test/Transforms/LoopVectorize/induction.ll +++ test/Transforms/LoopVectorize/induction.ll @@ -78,21 +78,15 @@ ; CHECK: vector.body: ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %[[i0:.+]] = add i64 %index, 0 -; CHECK: %[[i1:.+]] = add i64 %index, 1 ; CHECK: getelementptr inbounds i64, i64* %a, i64 %[[i0]] -; CHECK: getelementptr inbounds i64, i64* %a, i64 %[[i1]] ; ; UNROLL-NO-IC-LABEL: @scalarize_induction_variable_01( ; UNROLL-NO-IC: vector.body: ; UNROLL-NO-IC: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; UNROLL-NO-IC: %[[i0:.+]] = add i64 %index, 0 -; UNROLL-NO-IC: %[[i1:.+]] = add i64 %index, 1 ; UNROLL-NO-IC: %[[i2:.+]] = add i64 %index, 2 -; UNROLL-NO-IC: %[[i3:.+]] = add i64 %index, 3 ; UNROLL-NO-IC: getelementptr inbounds i64, i64* %a, i64 %[[i0]] -; UNROLL-NO-IC: getelementptr inbounds i64, i64* %a, i64 %[[i1]] ; UNROLL-NO-IC: getelementptr inbounds i64, i64* %a, i64 %[[i2]] -; UNROLL-NO-IC: getelementptr inbounds i64, i64* %a, i64 %[[i3]] ; ; IND-LABEL: @scalarize_induction_variable_01( ; IND: vector.body: @@ -611,9 +605,7 @@ ; CHECK: %vec.ind = phi <2 x i32> [ %[[START]], %vector.ph ], [ %vec.ind.next, %vector.body ] ; CHECK: %offset.idx = add i32 %i, %index ; CHECK: %[[A1:.*]] = add i32 %offset.idx, 0 -; CHECK: %[[A2:.*]] = add i32 %offset.idx, 1 ; CHECK: %[[G1:.*]] = getelementptr inbounds i32, i32* %a, i32 %[[A1]] -; CHECK: %[[G2:.*]] = getelementptr inbounds i32, i32* %a, i32 %[[A2]] ; CHECK: %[[G3:.*]] = getelementptr i32, i32* %[[G1]], i32 0 ; CHECK: %[[B1:.*]] = bitcast i32* %[[G3]] to <2 x i32>* ; CHECK: store <2 x i32> %vec.ind, <2 x i32>* %[[B1]] Index: test/Transforms/LoopVectorize/reverse_induction.ll =================================================================== --- test/Transforms/LoopVectorize/reverse_induction.ll +++ test/Transforms/LoopVectorize/reverse_induction.ll @@ -8,13 +8,7 @@ ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i64 %startval, %index ; CHECK: %[[a0:.+]] = add i64 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i64 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i64 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i64 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i64 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i64 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i64 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i64 %offset.idx, -7 define i32 @reverse_induction_i64(i64 %startval, i32 * %ptr) { entry: @@ -40,13 +34,7 @@ ; CHECK: %index = phi i128 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i128 %startval, %index ; CHECK: %[[a0:.+]] = add i128 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i128 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i128 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i128 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i128 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i128 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i128 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i128 %offset.idx, -7 define i32 @reverse_induction_i128(i128 %startval, i32 * %ptr) { entry: @@ -72,13 +60,7 @@ ; CHECK: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i16 %startval, {{.*}} ; CHECK: %[[a0:.+]] = add i16 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i16 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i16 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i16 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i16 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i16 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i16 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i16 %offset.idx, -7 define i32 @reverse_induction_i16(i16 %startval, i32 * %ptr) { entry: @@ -121,13 +103,7 @@ ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i64 1023, %index ; CHECK: %[[a0:.+]] = add i64 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i64 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i64 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i64 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i64 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i64 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i64 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i64 %offset.idx, -7 define void @reverse_forward_induction_i64_i8() { entry: @@ -153,13 +129,7 @@ ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] ; CHECK: %offset.idx = sub i64 1023, %index ; CHECK: %[[a0:.+]] = add i64 %offset.idx, 0 -; CHECK: %[[a1:.+]] = add i64 %offset.idx, -1 -; CHECK: %[[a2:.+]] = add i64 %offset.idx, -2 -; CHECK: %[[a3:.+]] = add i64 %offset.idx, -3 ; CHECK: %[[a4:.+]] = add i64 %offset.idx, -4 -; CHECK: %[[a5:.+]] = add i64 %offset.idx, -5 -; CHECK: %[[a6:.+]] = add i64 %offset.idx, -6 -; CHECK: %[[a7:.+]] = add i64 %offset.idx, -7 define void @reverse_forward_induction_i64_i8_signed() { entry: