diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2614,15 +2614,14 @@ // Determine the number of scalars we need to generate for each unroll // iteration. If EntryVal is uniform, we only need to generate the first // lane. Otherwise, we generate all VF values. - bool IsUniform = - Cost->isUniformAfterVectorization(cast(EntryVal), State.VF); - unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); + bool FirstLaneOnly = vputils::onlyFirstLaneDemanded(Def); + unsigned Lanes = FirstLaneOnly ? 1 : State.VF.getKnownMinValue(); // Compute the scalar steps and save the results in State. Type *IntStepTy = IntegerType::get(ScalarIVTy->getContext(), ScalarIVTy->getScalarSizeInBits()); Type *VecIVTy = nullptr; Value *UnitStepVec = nullptr, *SplatStep = nullptr, *SplatIV = nullptr; - if (!IsUniform && State.VF.isScalable()) { + if (!FirstLaneOnly && State.VF.isScalable()) { VecIVTy = VectorType::get(ScalarIVTy, State.VF); UnitStepVec = Builder.CreateStepVector(VectorType::get(IntStepTy, State.VF)); @@ -2633,7 +2632,7 @@ for (unsigned Part = 0; Part < State.UF; ++Part) { Value *StartIdx0 = createStepForVF(Builder, IntStepTy, State.VF, Part); - if (!IsUniform && State.VF.isScalable()) { + if (!FirstLaneOnly && State.VF.isScalable()) { auto *SplatStartIdx = Builder.CreateVectorSplat(State.VF, StartIdx0); auto *InitVec = Builder.CreateAdd(SplatStartIdx, UnitStepVec); if (ScalarIVTy->isFloatingPointTy()) diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -2745,6 +2745,13 @@ bool isCompletelySLP() const { return CompletelySLP; } }; +namespace vputils { + +/// Returns true if only the first lane of \p Def is used. +bool onlyFirstLaneDemanded(VPValue *Def); + +} // end namespace vputils + } // end namespace llvm #endif // LLVM_TRANSFORMS_VECTORIZE_VPLAN_H diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -1655,3 +1655,24 @@ for (VPValue *Def : Recipe.definedValues()) assignSlot(Def); } + +bool vputils::onlyFirstLaneDemanded(VPValue *Def) { + return all_of(Def->users(), [&Def](VPUser *U) { + auto *R = dyn_cast(U); + if (auto *RepR = dyn_cast(R)) + return RepR->isUniform(); + if (auto *MemR = dyn_cast(R)) { + // Widened, consecutive memory operations only demand the first lane, + // unless Def is stored. + return MemR->isConsecutive() && MemR->getAddr() == Def && + (!MemR->isStore() || MemR->getStoredValue() != Def); + } + if (auto *VPI = dyn_cast(R)) + return VPI->getOpcode() == VPInstruction::ActiveLaneMask; + + if (auto *B = dyn_cast(R)) + return onlyFirstLaneDemanded(B); + + return false; + }); +} diff --git a/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll b/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll --- a/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/conversion-cost.ll @@ -25,46 +25,15 @@ ; CHECK-NEXT: [[VEC_IND1:%.*]] = phi <32 x i8> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX]] ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 0 -; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 1 -; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 2 -; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 3 -; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], 4 -; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], 5 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 6 -; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 7 -; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 8 -; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], 9 -; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 10 -; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[OFFSET_IDX]], 11 -; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[OFFSET_IDX]], 12 -; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[OFFSET_IDX]], 13 -; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[OFFSET_IDX]], 14 -; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[OFFSET_IDX]], 15 -; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[OFFSET_IDX]], 16 -; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[OFFSET_IDX]], 17 -; CHECK-NEXT: [[TMP23:%.*]] = add i64 [[OFFSET_IDX]], 18 -; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[OFFSET_IDX]], 19 -; CHECK-NEXT: [[TMP25:%.*]] = add i64 [[OFFSET_IDX]], 20 -; CHECK-NEXT: [[TMP26:%.*]] = add i64 [[OFFSET_IDX]], 21 -; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[OFFSET_IDX]], 22 -; CHECK-NEXT: [[TMP28:%.*]] = add i64 [[OFFSET_IDX]], 23 -; CHECK-NEXT: [[TMP29:%.*]] = add i64 [[OFFSET_IDX]], 24 -; CHECK-NEXT: [[TMP30:%.*]] = add i64 [[OFFSET_IDX]], 25 -; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[OFFSET_IDX]], 26 -; CHECK-NEXT: [[TMP32:%.*]] = add i64 [[OFFSET_IDX]], 27 -; CHECK-NEXT: [[TMP33:%.*]] = add i64 [[OFFSET_IDX]], 28 -; CHECK-NEXT: [[TMP34:%.*]] = add i64 [[OFFSET_IDX]], 29 -; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[OFFSET_IDX]], 30 -; CHECK-NEXT: [[TMP36:%.*]] = add i64 [[OFFSET_IDX]], 31 -; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i64 [[TMP5]] -; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, i8* [[TMP37]], i32 0 -; CHECK-NEXT: [[TMP39:%.*]] = bitcast i8* [[TMP38]] to <32 x i8>* -; CHECK-NEXT: store <32 x i8> [[VEC_IND1]], <32 x i8>* [[TMP39]], align 1 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, i8* [[TMP6]], i32 0 +; CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to <32 x i8>* +; CHECK-NEXT: store <32 x i8> [[VEC_IND1]], <32 x i8>* [[TMP8]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <32 x i64> [[VEC_IND]], ; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <32 x i8> [[VEC_IND1]], -; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP40]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[SCALAR_PH]] @@ -73,9 +42,9 @@ ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] ; CHECK: .lr.ph: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[DOTLR_PH]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[TMP41:%.*]] = trunc i64 [[INDVARS_IV]] to i8 -; CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: store i8 [[TMP41]], i8* [[TMP42]], align 1 +; CHECK-NEXT: [[TMP10:%.*]] = trunc i64 [[INDVARS_IV]] to i8 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 [[INDVARS_IV]] +; CHECK-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll --- a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll @@ -32,7 +32,7 @@ ; CHECK-NEXT: call void @llvm.masked.store.v64i8.p0v64i8(<64 x i8> [[TMP6]], <64 x i8>* [[TMP7]], i32 1, <64 x i1> [[TMP1]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 64 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 256 -; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP0:!llvm.loop !.*]] ; CHECK: middle.block: ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: @@ -73,7 +73,7 @@ ; AUTOVF-NEXT: call void @llvm.masked.store.v32i8.p0v32i8(<32 x i8> [[TMP6]], <32 x i8>* [[TMP7]], i32 1, <32 x i1> [[TMP1]]) ; AUTOVF-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 32 ; AUTOVF-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 224 -; AUTOVF-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; AUTOVF-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP0:!llvm.loop !.*]] ; AUTOVF: middle.block: ; AUTOVF-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] ; AUTOVF: scalar.ph: @@ -177,7 +177,7 @@ ; AUTOVF-NEXT: call void @llvm.masked.store.v32i8.p0v32i8(<32 x i8> [[TMP6]], <32 x i8>* [[TMP7]], i32 1, <32 x i1> [[TMP1]]) ; AUTOVF-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 32 ; AUTOVF-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], 224 -; AUTOVF-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; AUTOVF-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP4:!llvm.loop !.*]] ; AUTOVF: middle.block: ; AUTOVF-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] ; AUTOVF: scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr36524.ll b/llvm/test/Transforms/LoopVectorize/X86/pr36524.ll --- a/llvm/test/Transforms/LoopVectorize/X86/pr36524.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr36524.ll @@ -9,9 +9,6 @@ ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 2, [[INDEX]] ; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 0 -; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 1 -; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], 2 -; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], 3 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 80 diff --git a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll --- a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll @@ -268,61 +268,49 @@ ; CHECK-NEXT: [[STEP_ADD1:%.*]] = add <4 x i64> [[STEP_ADD]], ; CHECK-NEXT: [[STEP_ADD2:%.*]] = add <4 x i64> [[STEP_ADD1]], ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 -; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 5 -; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 6 -; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 7 -; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 8 -; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 9 -; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 10 -; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 11 -; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 12 -; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 13 -; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 14 -; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 15 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 12 ; CHECK-NEXT: [[STEP_ADD5:%.*]] = add <4 x i32> [[VEC_IND4]], ; CHECK-NEXT: [[STEP_ADD6:%.*]] = add <4 x i32> [[STEP_ADD5]], ; CHECK-NEXT: [[STEP_ADD7:%.*]] = add <4 x i32> [[STEP_ADD6]], -; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i32> [[VEC_IND4]], i32 0 -; CHECK-NEXT: store i32 [[TMP16]], i32* [[ADDR:%.*]], align 4 -; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[VEC_IND4]], i32 1 +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[VEC_IND4]], i32 0 +; CHECK-NEXT: store i32 [[TMP4]], i32* [[ADDR:%.*]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[VEC_IND4]], i32 1 +; CHECK-NEXT: store i32 [[TMP5]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i32> [[VEC_IND4]], i32 2 +; CHECK-NEXT: store i32 [[TMP6]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[VEC_IND4]], i32 3 +; CHECK-NEXT: store i32 [[TMP7]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i32> [[STEP_ADD5]], i32 0 +; CHECK-NEXT: store i32 [[TMP8]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i32> [[STEP_ADD5]], i32 1 +; CHECK-NEXT: store i32 [[TMP9]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i32> [[STEP_ADD5]], i32 2 +; CHECK-NEXT: store i32 [[TMP10]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x i32> [[STEP_ADD5]], i32 3 +; CHECK-NEXT: store i32 [[TMP11]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i32> [[STEP_ADD6]], i32 0 +; CHECK-NEXT: store i32 [[TMP12]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i32> [[STEP_ADD6]], i32 1 +; CHECK-NEXT: store i32 [[TMP13]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i32> [[STEP_ADD6]], i32 2 +; CHECK-NEXT: store i32 [[TMP14]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[STEP_ADD6]], i32 3 +; CHECK-NEXT: store i32 [[TMP15]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i32> [[STEP_ADD7]], i32 0 +; CHECK-NEXT: store i32 [[TMP16]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[STEP_ADD7]], i32 1 ; CHECK-NEXT: store i32 [[TMP17]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i32> [[VEC_IND4]], i32 2 +; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i32> [[STEP_ADD7]], i32 2 ; CHECK-NEXT: store i32 [[TMP18]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i32> [[VEC_IND4]], i32 3 +; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i32> [[STEP_ADD7]], i32 3 ; CHECK-NEXT: store i32 [[TMP19]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i32> [[STEP_ADD5]], i32 0 -; CHECK-NEXT: store i32 [[TMP20]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i32> [[STEP_ADD5]], i32 1 -; CHECK-NEXT: store i32 [[TMP21]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[STEP_ADD5]], i32 2 -; CHECK-NEXT: store i32 [[TMP22]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i32> [[STEP_ADD5]], i32 3 -; CHECK-NEXT: store i32 [[TMP23]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i32> [[STEP_ADD6]], i32 0 -; CHECK-NEXT: store i32 [[TMP24]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP25:%.*]] = extractelement <4 x i32> [[STEP_ADD6]], i32 1 -; CHECK-NEXT: store i32 [[TMP25]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i32> [[STEP_ADD6]], i32 2 -; CHECK-NEXT: store i32 [[TMP26]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP27:%.*]] = extractelement <4 x i32> [[STEP_ADD6]], i32 3 -; CHECK-NEXT: store i32 [[TMP27]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP28:%.*]] = extractelement <4 x i32> [[STEP_ADD7]], i32 0 -; CHECK-NEXT: store i32 [[TMP28]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP29:%.*]] = extractelement <4 x i32> [[STEP_ADD7]], i32 1 -; CHECK-NEXT: store i32 [[TMP29]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP30:%.*]] = extractelement <4 x i32> [[STEP_ADD7]], i32 2 -; CHECK-NEXT: store i32 [[TMP30]], i32* [[ADDR]], align 4 -; CHECK-NEXT: [[TMP31:%.*]] = extractelement <4 x i32> [[STEP_ADD7]], i32 3 -; CHECK-NEXT: store i32 [[TMP31]], i32* [[ADDR]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[STEP_ADD2]], ; CHECK-NEXT: [[VEC_IND_NEXT9]] = add <4 x i32> [[STEP_ADD7]], -; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 -; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4097, 4096 ; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]] diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-complex.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-complex.ll --- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-complex.ll +++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-complex.ll @@ -488,21 +488,18 @@ ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ , [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 -; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1 -; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 2 -; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[INDEX]], 3 -; CHECK-NEXT: [[TMP7:%.*]] = sext i32 [[TMP3]] to i64 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP7]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, i16* [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[TMP9]] to <4 x i16>* -; CHECK-NEXT: [[WIDE_LOAD]] = load <4 x i16>, <4 x i16>* [[TMP10]], align 2 -; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> -; CHECK-NEXT: [[TMP12:%.*]] = bitcast i16* [[TMP9]] to <4 x i16>* -; CHECK-NEXT: store <4 x i16> [[TMP11]], <4 x i16>* [[TMP12]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP5]], i32 0 +; CHECK-NEXT: [[TMP7:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>* +; CHECK-NEXT: [[WIDE_LOAD]] = load <4 x i16>, <4 x i16>* [[TMP7]], align 2 +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> +; CHECK-NEXT: [[TMP9:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>* +; CHECK-NEXT: store <4 x i16> [[TMP8]], <4 x i16>* [[TMP9]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], -; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 3 ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 2 @@ -572,21 +569,18 @@ ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i16> [ , [[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 -; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1 -; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 2 -; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[INDEX]], 3 -; CHECK-NEXT: [[TMP7:%.*]] = sext i32 [[TMP3]] to i64 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP7]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, i16* [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP10:%.*]] = bitcast i16* [[TMP9]] to <4 x i16>* -; CHECK-NEXT: [[WIDE_LOAD]] = load <4 x i16>, <4 x i16>* [[TMP10]], align 2 -; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> -; CHECK-NEXT: [[TMP12:%.*]] = bitcast i16* [[TMP9]] to <4 x i16>* -; CHECK-NEXT: store <4 x i16> [[TMP11]], <4 x i16>* [[TMP12]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[TMP5]], i32 0 +; CHECK-NEXT: [[TMP7:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>* +; CHECK-NEXT: [[WIDE_LOAD]] = load <4 x i16>, <4 x i16>* [[TMP7]], align 2 +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i16> [[VECTOR_RECUR]], <4 x i16> [[WIDE_LOAD]], <4 x i32> +; CHECK-NEXT: [[TMP9:%.*]] = bitcast i16* [[TMP6]] to <4 x i16>* +; CHECK-NEXT: store <4 x i16> [[TMP8]], <4 x i16>* [[TMP9]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], -; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 3 ; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <4 x i16> [[WIDE_LOAD]], i32 2 diff --git a/llvm/test/Transforms/LoopVectorize/loop-form.ll b/llvm/test/Transforms/LoopVectorize/loop-form.ll --- a/llvm/test/Transforms/LoopVectorize/loop-form.ll +++ b/llvm/test/Transforms/LoopVectorize/loop-form.ll @@ -132,7 +132,6 @@ ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 -; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP3]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, i16* [[TMP6]], i32 0 @@ -269,7 +268,6 @@ ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 -; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP3]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, i16* [[TMP6]], i32 0 @@ -354,7 +352,6 @@ ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 -; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP3]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, i16* [[TMP6]], i32 0 @@ -441,7 +438,6 @@ ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 -; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP3]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, i16* [[TMP6]], i32 0 @@ -529,7 +525,6 @@ ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 -; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP3]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, i16* [[TMP6]], i32 0 @@ -621,7 +616,6 @@ ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 -; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP5:%.*]] = sext i32 [[TMP3]] to i64 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, i16* [[P:%.*]], i64 [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i16, i16* [[TMP6]], i32 0 @@ -1154,7 +1148,6 @@ ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, i32* [[ADDR:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, i32* [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP3]] to <2 x i32>*