diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4440,9 +4440,9 @@ VPValue *StartVPV, VPValue *Def, VPTransformState &State) { PHINode *P = cast(PN); - if (EnableVPlanNativePath) { - // Currently we enter here in the VPlan-native path for non-induction - // PHIs where all control flow is uniform. We simply widen these PHIs. + if (EnableVPlanNativePath && !OrigLoop->isInnermost()) { + // We enter here in the VPlan-native path and when the loop is not the + // innermost loop. We handle non-induction PHIs here and simply widen them. // Create a vector phi with no operands - the vector phi operands will be // set at the end of vector code generation. Type *VecTy = (State.VF.isScalar()) diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -599,7 +599,8 @@ assert((EnableVPlanNativePath || isa(LastBB->getTerminator())) && "Expected InnerLoop VPlan CFG to terminate with unreachable"); - assert((!EnableVPlanNativePath || isa(LastBB->getTerminator())) && + assert((!EnableVPlanNativePath || + (L->isInnermost() || isa(LastBB->getTerminator()))) && "Expected VPlan CFG to terminate with branch in NativePath"); LastBB->getTerminator()->eraseFromParent(); BranchInst::Create(VectorLatchBB, LastBB); diff --git a/llvm/test/Transforms/LoopVectorize/vplan-vectorize-inner-loop.ll b/llvm/test/Transforms/LoopVectorize/vplan-vectorize-inner-loop.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/vplan-vectorize-inner-loop.ll @@ -0,0 +1,85 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -loop-vectorize -force-vector-width=4 -enable-vplan-native-path -S %s | FileCheck %s + +; Test that when VPlan native path is enabled and no explicit loop is marked to +; be vectorized that innermost loop will be vectorized without issues. + +define void @inner_loop_reduction(double* noalias nocapture readonly %a.in, double* noalias nocapture readonly %b.in, double* noalias nocapture %c.out) { +; CHECK-LABEL: @inner_loop_reduction( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR1_HEADER:%.*]] +; CHECK: for1.header: +; CHECK-NEXT: [[INDVAR1:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR11:%.*]], [[FOR1_LATCH:%.*]] ] +; CHECK-NEXT: [[A_PTR:%.*]] = getelementptr inbounds double, double* [[A_IN:%.*]], i64 [[INDVAR1]] +; CHECK-NEXT: [[A:%.*]] = load double, double* [[A_PTR]], align 8 +; CHECK-NEXT: [[B_PTR:%.*]] = getelementptr inbounds double, double* [[B_IN:%.*]], i64 [[INDVAR1]] +; CHECK-NEXT: [[B:%.*]] = load double, double* [[B_PTR]], align 8 +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x double> zeroinitializer, double [[A]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x double> poison, double [[B]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x double> [[BROADCAST_SPLATINSERT]], <4 x double> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x double> [ [[TMP0]], [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP1]] = fadd <4 x double> [[BROADCAST_SPLAT]], [[VEC_PHI]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 10000 +; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP0:!llvm.loop !.*]] +; CHECK: middle.block: +; CHECK-NEXT: [[TMP3:%.*]] = call double @llvm.vector.reduce.fadd.v4f64(double -0.000000e+00, <4 x double> [[TMP1]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 10000, 10000 +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR1_LATCH]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 10000, [[MIDDLE_BLOCK]] ], [ 0, [[FOR1_HEADER]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi double [ [[A]], [[FOR1_HEADER]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: br label [[FOR2_HEADER:%.*]] +; CHECK: for2.header: +; CHECK-NEXT: [[INDVAR2:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVAR21:%.*]], [[FOR2_HEADER]] ] +; CHECK-NEXT: [[A_REDUCTION:%.*]] = phi double [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[A_REDUCTION1:%.*]], [[FOR2_HEADER]] ] +; CHECK-NEXT: [[A_REDUCTION1]] = fadd double [[B]], [[A_REDUCTION]] +; CHECK-NEXT: [[INDVAR21]] = add nuw nsw i32 [[INDVAR2]], 1 +; CHECK-NEXT: [[FOR2_COND:%.*]] = icmp eq i32 [[INDVAR21]], 10000 +; CHECK-NEXT: br i1 [[FOR2_COND]], label [[FOR1_LATCH]], label [[FOR2_HEADER]], [[LOOP2:!llvm.loop !.*]] +; CHECK: for1.latch: +; CHECK-NEXT: [[A_REDUCTION1_LCSSA:%.*]] = phi double [ [[A_REDUCTION1]], [[FOR2_HEADER]] ], [ [[TMP3]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[C_PTR:%.*]] = getelementptr inbounds double, double* [[C_OUT:%.*]], i64 [[INDVAR1]] +; CHECK-NEXT: store double [[A_REDUCTION1_LCSSA]], double* [[C_PTR]], align 8 +; CHECK-NEXT: [[INDVAR11]] = add nuw nsw i64 [[INDVAR1]], 1 +; CHECK-NEXT: [[FOR1_COND:%.*]] = icmp eq i64 [[INDVAR11]], 1000 +; CHECK-NEXT: br i1 [[FOR1_COND]], label [[EXIT:%.*]], label [[FOR1_HEADER]] +; CHECK: exit: +; CHECK-NEXT: ret void + +entry: + br label %for1.header + +for1.header: ; preds = %entry + %indvar1 = phi i64 [ 0, %entry ], [ %indvar11, %for1.latch ] + %a.ptr = getelementptr inbounds double, double* %a.in, i64 %indvar1 + %a = load double, double* %a.ptr, align 8 + %b.ptr = getelementptr inbounds double, double* %b.in, i64 %indvar1 + %b = load double, double* %b.ptr, align 8 + br label %for2.header + +for2.header: ; preds = %for1.header, %for2.header + %indvar2 = phi i32 [ 0, %for1.header ], [ %indvar21, %for2.header ] + %a.reduction = phi double [ %a, %for1.header ], [ %a.reduction1, %for2.header ] + %a.reduction1 = fadd double %b, %a.reduction + %indvar21 = add nuw nsw i32 %indvar2, 1 + %for2.cond = icmp eq i32 %indvar21, 10000 + br i1 %for2.cond, label %for1.latch, label %for2.header + +for1.latch: ; preds = %for2.header + %c.ptr = getelementptr inbounds double, double* %c.out, i64 %indvar1 + store double %a.reduction1, double* %c.ptr, align 8 + %indvar11 = add nuw nsw i64 %indvar1, 1 + %for1.cond = icmp eq i64 %indvar11, 1000 + br i1 %for1.cond, label %exit, label %for1.header + +exit: ; preds = %for1.latch + ret void +}