diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -441,7 +441,9 @@ IRBuilder<> Builder(State->CFG.PrevBB->getTerminator()); auto *TCMO = Builder.CreateSub(TC, ConstantInt::get(TC->getType(), 1), "trip.count.minus.1"); - Value *VTCMO = Builder.CreateVectorSplat(State->VF, TCMO, "broadcast"); + auto VF = State->VF; + Value *VTCMO = + VF == 1 ? TCMO : Builder.CreateVectorSplat(VF, TCMO, "broadcast"); for (unsigned Part = 0, UF = State->UF; Part < UF; ++Part) State->set(BackedgeTakenCount, VTCMO, Part); } @@ -809,12 +811,17 @@ Value *CanonicalIV = State.CanonicalIV; Type *STy = CanonicalIV->getType(); IRBuilder<> Builder(State.CFG.PrevBB->getTerminator()); - Value *VStart = Builder.CreateVectorSplat(State.VF, CanonicalIV, "broadcast"); + auto VF = State.VF; + Value *VStart = VF == 1 + ? CanonicalIV + : Builder.CreateVectorSplat(VF, CanonicalIV, "broadcast"); for (unsigned Part = 0, UF = State.UF; Part < UF; ++Part) { SmallVector Indices; - for (unsigned Lane = 0, VF = State.VF; Lane < VF; ++Lane) + for (unsigned Lane = 0; Lane < VF; ++Lane) Indices.push_back(ConstantInt::get(STy, Part * VF + Lane)); - Constant *VStep = ConstantVector::get(Indices); + // If VF == 1, there is only one iteration in the loop above, thus the + // element pushed back into Indices is ConstantInt::get(STy, Part) + Constant *VStep = VF == 1 ? Indices.back() : ConstantVector::get(Indices); // Add the consecutive indices to the vector value. Value *CanonicalVectorIV = Builder.CreateAdd(VStart, VStep, "vec.iv"); State.set(getVPValue(), CanonicalVectorIV, Part); diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll @@ -0,0 +1,125 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -loop-vectorize -force-vector-interleave=4 -pass-remarks='loop-vectorize' -disable-output -S 2>&1 | FileCheck %s --check-prefix=CHECK-REMARKS +; RUN: opt < %s -loop-vectorize -force-vector-interleave=4 -S | FileCheck %s + +; These tests are to check that fold-tail procedure produces correct scalar code when +; loop-vectorization is only unrolling but not vectorizing. + +; CHECK-REMARKS: remark: {{.*}} interleaved loop (interleaved count: 4) +; CHECK-REMARKS-NEXT: remark: {{.*}} interleaved loop (interleaved count: 4) +; CHECK-REMARKS-NOT: remark: {{.*}} vectorized loop + +define void @vectorize-factor-1-scalar-bound() { +; CHECK-LABEL: @vectorize-factor-1-scalar-bound( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[INDUCTION:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[INDUCTION1:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[INDUCTION2:%.*]] = add i64 [[INDEX]], 2 +; CHECK-NEXT: [[INDUCTION3:%.*]] = add i64 [[INDEX]], 3 +; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i64 [[INDUCTION]], 14 +; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i64 [[INDUCTION1]], 14 +; CHECK-NEXT: [[TMP2:%.*]] = icmp ule i64 [[INDUCTION2]], 14 +; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i64 [[INDUCTION3]], 14 +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], {{[0-9]*}} +; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]] +; CHECK: middle.block: +; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15 +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] +; +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond = icmp eq i64 %indvars.iv.next, 15 + br i1 %exitcond, label %for.cond.cleanup, label %for.body +} + +define void @vectorize-factor-1-vector-bound(double* %pt1) !prof !12 { +; CHECK-LABEL: @vectorize-factor-1-vector-bound( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[IND_END:%.*]] = getelementptr double, double* [[PT1:%.*]], i64 {{[0-9]*}} +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr double, double* [[PT1]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr double, double* [[PT1]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr double, double* [[PT1]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr double, double* [[PT1]], i64 [[TMP3]] +; CHECK-NEXT: [[VEC_IV:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[VEC_IV4:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[VEC_IV5:%.*]] = add i64 [[INDEX]], 2 +; CHECK-NEXT: [[VEC_IV6:%.*]] = add i64 [[INDEX]], 3 +; CHECK-NEXT: [[TMP4:%.*]] = icmp ule i64 [[VEC_IV]], {{[0-9]*}} +; CHECK-NEXT: [[TMP5:%.*]] = icmp ule i64 [[VEC_IV4]], {{[0-9]*}} +; CHECK-NEXT: [[TMP6:%.*]] = icmp ule i64 [[VEC_IV5]], {{[0-9]*}} +; CHECK-NEXT: [[TMP7:%.*]] = icmp ule i64 [[VEC_IV6]], {{[0-9]*}} +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], {{[0-9]*}} +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]] +; CHECK: middle.block: +; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi double* [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PT1]], [[ENTRY:%.*]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; CHECK: for.body: +; CHECK-NEXT: [[ADDR:%.*]] = phi double* [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[PTR]] = getelementptr inbounds double, double* [[ADDR]], i64 1 +; CHECK-NEXT: [[COND:%.*]] = icmp eq double* [[PTR]], [[PT1]] +; CHECK-NEXT: br i1 [[COND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]] +; +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %addr = phi double* [ %ptr, %for.body ], [ %pt1, %entry ] + %ptr = getelementptr inbounds double, double* %addr, i64 1 + %cond = icmp eq double* %ptr, %pt1 + br i1 %cond, label %for.cond.cleanup, label %for.body +} + +!llvm.module.flags = !{!0} + +!0 = !{i32 1, !"ProfileSummary", !1} +!1 = !{!2, !3, !4, !5, !6, !7, !8, !9} +!2 = !{!"ProfileFormat", !"InstrProf"} +!3 = !{!"TotalCount", i64 10} +!4 = !{!"MaxCount", i64 20} +!5 = !{!"MaxInternalCount", i64 30} +!6 = !{!"MaxFunctionCount", i64 40} +!7 = !{!"NumCounts", i64 50} +!8 = !{!"NumFunctions", i64 60} +!9 = !{!"DetailedSummary", !10} +!10 = !{!11} +!11 = !{i32 999999, i64 70, i32 80} +!12 = !{!"function_entry_count", i64 0}