Index: llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h =================================================================== --- llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h +++ llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h @@ -195,9 +195,6 @@ Instruction *getExactFPInst() { return ExactFPMathInst; } - bool canVectorizeFPMath(const LoopVectorizeHints &Hints) const { - return !ExactFPMathInst || Hints.allowReordering(); - } unsigned getNumRuntimePointerChecks() const { return NumRuntimePointerChecks; @@ -255,6 +252,11 @@ /// If false, good old LV code. bool canVectorize(bool UseVPlanNativePath); + /// Returns true if it is legal to vectorize the FP math operations in this + /// loop. Vectorizing is legal if we allow reordering of FP operations, or if + /// we can use in-order reductions. + bool canVectorizeFPMath(bool EnableStrictReductions); + /// Return true if we can vectorize this loop while folding its tail by /// masking, and mark all respective loads/stores for masking. /// This object's state is only modified iff this function returns true. Index: llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -857,6 +857,39 @@ return true; } +bool LoopVectorizationLegality::canVectorizeFPMath( + bool EnableStrictReductions) { + + // First check if there is any ExactFP math or if we allow reassociations + if (!Requirements->getExactFPInst() || Hints->allowReordering()) + return true; + + if (!EnableStrictReductions) + return false; + + // If the above is false, we have ExactFPMath & do not allow reordering. + // First check if we have any Exact FP induction vars, which we cannot + // vectorize. + if (any_of(getInductionVars(), [&](auto &Induction) -> bool { + InductionDescriptor IndDesc = Induction.second; + return IndDesc.getExactFPMathInst(); + })) + return false; + + // We can now only vectorize if all reductions with Exact FP math also + // have the isOrdered flag set, which indicates that we can move the + // reduction operations in-loop. + bool ExactRdxVars = (any_of(getReductionVars(), [&](auto &Reduction) -> bool { + RecurrenceDescriptor RdxDesc = Reduction.second; + return RdxDesc.hasExactFPMath() && !RdxDesc.isOrdered(); + })); + + if (getReductionVars().empty() || !ExactRdxVars) + return true; + + return false; +} + bool LoopVectorizationLegality::isInductionPhi(const Value *V) { Value *In0 = const_cast(V); PHINode *PN = dyn_cast_or_null(In0); Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -9916,7 +9916,7 @@ return false; } - if (!Requirements.canVectorizeFPMath(Hints)) { + if (!LVL.canVectorizeFPMath(EnableStrictReductions)) { ORE->emit([&]() { auto *ExactFPMathInst = Requirements.getExactFPInst(); return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", Index: llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll +++ llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll @@ -1,14 +1,20 @@ -; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve -enable-strict-reductions -S | FileCheck %s -check-prefix=CHECK +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve -force-vector-width=8 -force-vector-interleave=1 -enable-strict-reductions -S | FileCheck %s -check-prefix=CHECK-VF8UF1 +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve -force-vector-width=8 -force-vector-interleave=4 -enable-strict-reductions -S | FileCheck %s -check-prefix=CHECK-VF8UF4 +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve -force-vector-width=4 -force-vector-interleave=1 -enable-strict-reductions -S | FileCheck %s -check-prefix=CHECK-VF4UF1 +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -mattr=+sve -enable-strict-reductions -force-vector-width=1 -force-vector-interleave=1 -S | FileCheck %s -check-prefix=CHECK-VF1UF1 define float @fadd_strict(float* noalias nocapture readonly %a, i64 %n) { -; CHECK-LABEL: @fadd_strict -; CHECK: vector.body: -; CHECK: %[[VEC_PHI:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] -; CHECK: %[[LOAD:.*]] = load , * -; CHECK: %[[RDX]] = call float @llvm.vector.reduce.fadd.nxv8f32(float %[[VEC_PHI]], %[[LOAD]]) -; CHECK: for.end -; CHECK: %[[PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK: ret float %[[PHI]] +; CHECK-VF8UF1-LABEL: @fadd_strict +; CHECK-VF8UF1: vector.body: +; CHECK-VF8UF1: %[[VEC_PHI:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] +; CHECK-VF8UF1: %[[LOAD:.*]] = load , * +; CHECK-VF8UF1: %[[RDX]] = call float @llvm.vector.reduce.fadd.nxv8f32(float %[[VEC_PHI]], %[[LOAD]]) +; CHECK-VF8UF1: for.end +; CHECK-VF8UF1: %[[PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX]], %middle.block ] +; CHECK-VF8UF1: ret float %[[PHI]] + +; CHECK-VF1UF1: vector.body +; CHECK-VF1UF1: call float @llvm.vector.reduce.fadd.nxv1f32(float {{.*}}, {{.*}}) entry: br label %for.body @@ -27,21 +33,21 @@ } define float @fadd_strict_unroll(float* noalias nocapture readonly %a, i64 %n) { -; CHECK-LABEL: @fadd_strict_unroll -; CHECK: vector.body: -; CHECK: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4:.*]], %vector.body ] -; CHECK-NOT: phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4]], %vector.body ] -; CHECK: %[[LOAD1:.*]] = load , * -; CHECK: %[[LOAD2:.*]] = load , * -; CHECK: %[[LOAD3:.*]] = load , * -; CHECK: %[[LOAD4:.*]] = load , * -; CHECK: %[[RDX1:.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float %[[VEC_PHI1]], %[[LOAD1]]) -; CHECK: %[[RDX2:.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float %[[RDX1]], %[[LOAD2]]) -; CHECK: %[[RDX3:.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float %[[RDX2]], %[[LOAD3]]) -; CHECK: %[[RDX4]] = call float @llvm.vector.reduce.fadd.nxv8f32(float %[[RDX3]], %[[LOAD4]]) -; CHECK: for.end -; CHECK: %[[PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX4]], %middle.block ] -; CHECK: ret float %[[PHI]] +; CHECK-VF8UF4-LABEL: @fadd_strict_unroll +; CHECK-VF8UF4: vector.body: +; CHECK-VF8UF4: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4:.*]], %vector.body ] +; CHECK-VF8UF4-NOT: phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4]], %vector.body ] +; CHECK-VF8UF4: %[[LOAD1:.*]] = load , * +; CHECK-VF8UF4: %[[LOAD2:.*]] = load , * +; CHECK-VF8UF4: %[[LOAD3:.*]] = load , * +; CHECK-VF8UF4: %[[LOAD4:.*]] = load , * +; CHECK-VF8UF4: %[[RDX1:.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float %[[VEC_PHI1]], %[[LOAD1]]) +; CHECK-VF8UF4: %[[RDX2:.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float %[[RDX1]], %[[LOAD2]]) +; CHECK-VF8UF4: %[[RDX3:.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float %[[RDX2]], %[[LOAD3]]) +; CHECK-VF8UF4: %[[RDX4]] = call float @llvm.vector.reduce.fadd.nxv8f32(float %[[RDX3]], %[[LOAD4]]) +; CHECK-VF8UF4: for.end +; CHECK-VF8UF4: %[[PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX4]], %middle.block ] +; CHECK-VF8UF4: ret float %[[PHI]] entry: br label %for.body @@ -53,36 +59,36 @@ %add = fadd float %0, %sum.07 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !1 + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 for.end: ret float %add } define void @fadd_strict_interleave(float* noalias nocapture readonly %a, float* noalias nocapture readonly %b, i64 %n) { -; CHECK-LABEL: @fadd_strict_interleave -; CHECK: entry -; CHECK: %[[ARRAYIDX:.*]] = getelementptr inbounds float, float* %a, i64 1 -; CHECK: %[[LOAD1:.*]] = load float, float* %a -; CHECK: %[[LOAD2:.*]] = load float, float* %[[ARRAYIDX]] -; CHECK: vector.ph -; CHECK: %[[STEPVEC1:.*]] = call @llvm.experimental.stepvector.nxv4i64() -; CHECK: %[[STEPVEC_ADD1:.*]] = add %[[STEPVEC1]], shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer) -; CHECK: %[[STEPVEC_MUL:.*]] = mul %[[STEPVEC_ADD1]], shufflevector ( insertelement ( poison, i64 2, i32 0), poison, zeroinitializer) -; CHECK: %[[INDUCTION:.*]] = add shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer), %[[STEPVEC_MUL]] -; CHECK: vector.body -; CHECK: %[[VEC_PHI2:.*]] = phi float [ %[[LOAD2]], %vector.ph ], [ %[[RDX2:.*]], %vector.body ] -; CHECK: %[[VEC_PHI1:.*]] = phi float [ %[[LOAD1]], %vector.ph ], [ %[[RDX1:.*]], %vector.body ] -; CHECK: %[[VEC_IND:.*]] = phi [ %[[INDUCTION]], %vector.ph ], [ {{.*}}, %vector.body ] -; CHECK: %[[GEP1:.*]] = getelementptr inbounds float, float* %b, %[[VEC_IND]] -; CHECK: %[[MGATHER1:.*]] = call @llvm.masked.gather.nxv4f32.nxv4p0f32( %[[GEP1]], i32 4, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), undef) -; CHECK: %[[RDX1]] = call float @llvm.vector.reduce.fadd.nxv4f32(float %[[VEC_PHI1]], %[[MGATHER1]]) -; CHECK: %[[OR:.*]] = or %[[VEC_IND]], shufflevector ( insertelement ( poison, i64 1, i32 0), poison, zeroinitializer) -; CHECK: %[[GEP2:.*]] = getelementptr inbounds float, float* %b, %[[OR]] -; CHECK: %[[MGATHER2:.*]] = call @llvm.masked.gather.nxv4f32.nxv4p0f32( %[[GEP2]], i32 4, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), undef) -; CHECK: %[[RDX2]] = call float @llvm.vector.reduce.fadd.nxv4f32(float %[[VEC_PHI2]], %[[MGATHER2]]) -; CHECK: for.end -; CHECK ret void +; CHECK-VF4UF1-LABEL: @fadd_strict_interleave +; CHECK-VF4UF1: entry +; CHECK-VF4UF1: %[[ARRAYIDX:.*]] = getelementptr inbounds float, float* %a, i64 1 +; CHECK-VF4UF1: %[[LOAD1:.*]] = load float, float* %a +; CHECK-VF4UF1: %[[LOAD2:.*]] = load float, float* %[[ARRAYIDX]] +; CHECK-VF4UF1: vector.ph +; CHECK-VF4UF1: %[[STEPVEC1:.*]] = call @llvm.experimental.stepvector.nxv4i64() +; CHECK-VF4UF1: %[[STEPVEC_ADD1:.*]] = add %[[STEPVEC1]], shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer) +; CHECK-VF4UF1: %[[STEPVEC_MUL:.*]] = mul %[[STEPVEC_ADD1]], shufflevector ( insertelement ( poison, i64 2, i32 0), poison, zeroinitializer) +; CHECK-VF4UF1: %[[INDUCTION:.*]] = add shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer), %[[STEPVEC_MUL]] +; CHECK-VF4UF1: vector.body +; CHECK-VF4UF1: %[[VEC_PHI2:.*]] = phi float [ %[[LOAD2]], %vector.ph ], [ %[[RDX2:.*]], %vector.body ] +; CHECK-VF4UF1: %[[VEC_PHI1:.*]] = phi float [ %[[LOAD1]], %vector.ph ], [ %[[RDX1:.*]], %vector.body ] +; CHECK-VF4UF1: %[[VEC_IND:.*]] = phi [ %[[INDUCTION]], %vector.ph ], [ {{.*}}, %vector.body ] +; CHECK-VF4UF1: %[[GEP1:.*]] = getelementptr inbounds float, float* %b, %[[VEC_IND]] +; CHECK-VF4UF1: %[[MGATHER1:.*]] = call @llvm.masked.gather.nxv4f32.nxv4p0f32( %[[GEP1]], i32 4, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), undef) +; CHECK-VF4UF1: %[[RDX1]] = call float @llvm.vector.reduce.fadd.nxv4f32(float %[[VEC_PHI1]], %[[MGATHER1]]) +; CHECK-VF4UF1: %[[OR:.*]] = or %[[VEC_IND]], shufflevector ( insertelement ( poison, i64 1, i32 0), poison, zeroinitializer) +; CHECK-VF4UF1: %[[GEP2:.*]] = getelementptr inbounds float, float* %b, %[[OR]] +; CHECK-VF4UF1: %[[MGATHER2:.*]] = call @llvm.masked.gather.nxv4f32.nxv4p0f32( %[[GEP2]], i32 4, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), undef) +; CHECK-VF4UF1: %[[RDX2]] = call float @llvm.vector.reduce.fadd.nxv4f32(float %[[VEC_PHI2]], %[[MGATHER2]]) +; CHECK-VF4UF1: for.end +; CHECK-VF4UF1: ret void entry: %arrayidxa = getelementptr inbounds float, float* %a, i64 1 %a1 = load float, float* %a, align 4 @@ -102,7 +108,7 @@ %add2 = fadd float %1, %add.phi1 %iv.next = add nuw nsw i64 %iv, 2 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !2 + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 for.end: store float %add1, float* %a, align 4 @@ -111,18 +117,18 @@ } define float @fadd_invariant(float* noalias nocapture readonly %a, float* noalias nocapture readonly %b, i64 %n) { -; CHECK-LABEL: @fadd_invariant -; CHECK: vector.body -; CHECK: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] -; CHECK: %[[LOAD1:.*]] = load , * -; CHECK: %[[LOAD2:.*]] = load , * -; CHECK: %[[ADD:.*]] = fadd %[[LOAD1]], %[[LOAD2]] -; CHECK: %[[RDX]] = call float @llvm.vector.reduce.fadd.nxv4f32(float %[[VEC_PHI1]], %[[ADD]]) -; CHECK: for.end.loopexit -; CHECK: %[[EXIT_PHI:.*]] = phi float [ {{.*}}, %for.body ], [ %[[RDX]], %middle.block ] -; CHECK: for.end -; CHECK: %[[PHI:.*]] = phi float [ 0.000000e+00, %entry ], [ %[[EXIT_PHI]], %for.end.loopexit ] -; CHECK: ret float %[[PHI]] +; CHECK-VF4UF1-LABEL: @fadd_invariant +; CHECK-VF4UF1: vector.body +; CHECK-VF4UF1: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] +; CHECK-VF4UF1: %[[LOAD1:.*]] = load , * +; CHECK-VF4UF1: %[[LOAD2:.*]] = load , * +; CHECK-VF4UF1: %[[ADD:.*]] = fadd %[[LOAD1]], %[[LOAD2]] +; CHECK-VF4UF1: %[[RDX]] = call float @llvm.vector.reduce.fadd.nxv4f32(float %[[VEC_PHI1]], %[[ADD]]) +; CHECK-VF4UF1: for.end.loopexit +; CHECK-VF4UF1: %[[EXIT_PHI:.*]] = phi float [ {{.*}}, %for.body ], [ %[[RDX]], %middle.block ] +; CHECK-VF4UF1: for.end +; CHECK-VF4UF1: %[[PHI:.*]] = phi float [ 0.000000e+00, %entry ], [ %[[EXIT_PHI]], %for.end.loopexit ] +; CHECK-VF4UF1: ret float %[[PHI]] entry: %arrayidx = getelementptr inbounds float, float* %a, i64 1 %0 = load float, float* %arrayidx, align 4 @@ -140,7 +146,7 @@ %rdx = fadd float %res.014, %add %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !2 + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 for.end: ; preds = %for.body, %entry %res = phi float [ 0.000000e+00, %entry ], [ %rdx, %for.body ] @@ -148,27 +154,27 @@ } define float @fadd_conditional(float* noalias nocapture readonly %a, float* noalias nocapture readonly %b, i64 %n) { -; CHECK-LABEL: @fadd_conditional -; CHECK: vector.body -; CHECK: %[[VEC_PHI:.*]] = phi float [ 1.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] -; CHECK: %[[LOAD:.*]] = load , * -; CHECK: %[[FCMP:.*]] = fcmp une %[[LOAD]], shufflevector ( insertelement ( poison, float 0.000000e+00, i32 0), poison, zeroinitializer) -; CHECK: %[[MASKED_LOAD:.*]] = call @llvm.masked.load.nxv4f32.p0nxv4f32(* {{.*}}, i32 4, %[[FCMP]], poison) -; CHECK: %[[XOR:.*]] = xor %[[FCMP]], shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer) -; CHECK: %[[SELECT:.*]] = select %[[XOR]], shufflevector ( insertelement ( poison, float 3.000000e+00, i32 0), poison, zeroinitializer), %[[MASKED_LOAD]] -; CHECK: %[[RDX]] = call float @llvm.vector.reduce.fadd.nxv4f32(float %[[VEC_PHI]], %[[SELECT]]) -; CHECK: scalar.ph -; CHECK: %[[MERGE_RDX:.*]] = phi float [ 1.000000e+00, %entry ], [ %[[RDX]], %middle.block ] -; CHECK: for.body -; CHECK: %[[RES:.*]] = phi float [ %[[MERGE_RDX]], %scalar.ph ], [ %[[FADD:.*]], %for.inc ] -; CHECK: if.then -; CHECK: %[[LOAD2:.*]] = load float, float* -; CHECK: for.inc -; CHECK: %[[PHI:.*]] = phi float [ %[[LOAD2]], %if.then ], [ 3.000000e+00, %for.body ] -; CHECK: %[[FADD]] = fadd float %[[RES]], %[[PHI]] -; CHECK: for.end -; CHECK: %[[RDX_PHI:.*]] = phi float [ %[[FADD]], %for.inc ], [ %[[RDX]], %middle.block ] -; CHECK: ret float %[[RDX_PHI]] +; CHECK-VF4UF1-LABEL: @fadd_conditional +; CHECK-VF4UF1: vector.body +; CHECK-VF4UF1: %[[VEC_PHI:.*]] = phi float [ 1.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] +; CHECK-VF4UF1: %[[LOAD:.*]] = load , * +; CHECK-VF4UF1: %[[FCMP:.*]] = fcmp une %[[LOAD]], shufflevector ( insertelement ( poison, float 0.000000e+00, i32 0), poison, zeroinitializer) +; CHECK-VF4UF1: %[[MASKED_LOAD:.*]] = call @llvm.masked.load.nxv4f32.p0nxv4f32(* {{.*}}, i32 4, %[[FCMP]], poison) +; CHECK-VF4UF1: %[[XOR:.*]] = xor %[[FCMP]], shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer) +; CHECK-VF4UF1: %[[SELECT:.*]] = select %[[XOR]], shufflevector ( insertelement ( poison, float 3.000000e+00, i32 0), poison, zeroinitializer), %[[MASKED_LOAD]] +; CHECK-VF4UF1: %[[RDX]] = call float @llvm.vector.reduce.fadd.nxv4f32(float %[[VEC_PHI]], %[[SELECT]]) +; CHECK-VF4UF1: scalar.ph +; CHECK-VF4UF1: %[[MERGE_RDX:.*]] = phi float [ 1.000000e+00, %entry ], [ %[[RDX]], %middle.block ] +; CHECK-VF4UF1: for.body +; CHECK-VF4UF1: %[[RES:.*]] = phi float [ %[[MERGE_RDX]], %scalar.ph ], [ %[[FADD:.*]], %for.inc ] +; CHECK-VF4UF1: if.then +; CHECK-VF4UF1: %[[LOAD2:.*]] = load float, float* +; CHECK-VF4UF1: for.inc +; CHECK-VF4UF1: %[[PHI:.*]] = phi float [ %[[LOAD2]], %if.then ], [ 3.000000e+00, %for.body ] +; CHECK-VF4UF1: %[[FADD]] = fadd float %[[RES]], %[[PHI]] +; CHECK-VF4UF1: for.end +; CHECK-VF4UF1: %[[RDX_PHI:.*]] = phi float [ %[[FADD]], %for.inc ], [ %[[RDX]], %middle.block ] +; CHECK-VF4UF1: ret float %[[RDX_PHI]] entry: br label %for.body @@ -190,7 +196,7 @@ %fadd = fadd float %res, %phi %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !2 + br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 for.end: %rdx = phi float [ %fadd, %for.inc ] @@ -201,24 +207,24 @@ ; Note: This test vectorizes the loop with a non-strict implementation, which reorders the FAdd operations. ; This is happening because we are using hints, where allowReordering returns true. define float @fadd_multiple(float* noalias nocapture %a, float* noalias nocapture %b, i64 %n) { -; CHECK-LABEL: @fadd_multiple -; CHECK: vector.body -; CHECK: %[[PHI:.*]] = phi [ insertelement ( shufflevector ( insertelement ( undef, float -0.000000e+00, i32 0), undef, zeroinitializer), float -0.000000e+00, i32 0), %vector.ph ], [ %[[VEC_FADD2:.*]], %vector.body ] -; CHECK: %[[VEC_LOAD1:.*]] = load , -; CHECK: %[[VEC_FADD1:.*]] = fadd %[[PHI]], %[[VEC_LOAD1]] -; CHECK: %[[VEC_LOAD2:.*]] = load , -; CHECK: %[[VEC_FADD2]] = fadd %[[VEC_FADD1]], %[[VEC_LOAD2]] -; CHECK: middle.block -; CHECK: %[[RDX:.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, %[[VEC_FADD2]]) -; CHECK: for.body -; CHECK: %[[SUM:.*]] = phi float [ %bc.merge.rdx, %scalar.ph ], [ %[[FADD2:.*]], %for.body ] -; CHECK: %[[LOAD1:.*]] = load float, float* -; CHECK: %[[FADD1:.*]] = fadd float %[[SUM]], %[[LOAD1]] -; CHECK: %[[LOAD2:.*]] = load float, float* -; CHECK: %[[FADD2]] = fadd float %[[FADD1]], %[[LOAD2]] -; CHECK: for.end -; CHECK: %[[RET:.*]] = phi float [ %[[FADD2]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK: ret float %[[RET]] +; CHECK-VF8UF1-LABEL: @fadd_multiple +; CHECK-VF8UF1: vector.body +; CHECK-VF8UF1: %[[PHI:.*]] = phi [ insertelement ( shufflevector ( insertelement ( undef, float -0.000000e+00, i32 0), undef, zeroinitializer), float -0.000000e+00, i32 0), %vector.ph ], [ %[[VEC_FADD2:.*]], %vector.body ] +; CHECK-VF8UF1: %[[VEC_LOAD1:.*]] = load , +; CHECK-VF8UF1: %[[VEC_FADD1:.*]] = fadd %[[PHI]], %[[VEC_LOAD1]] +; CHECK-VF8UF1: %[[VEC_LOAD2:.*]] = load , +; CHECK-VF8UF1: %[[VEC_FADD2]] = fadd %[[VEC_FADD1]], %[[VEC_LOAD2]] +; CHECK-VF8UF1: middle.block +; CHECK-VF8UF1: %[[RDX:.*]] = call float @llvm.vector.reduce.fadd.nxv8f32(float -0.000000e+00, %[[VEC_FADD2]]) +; CHECK-VF8UF1: for.body +; CHECK-VF8UF1: %[[SUM:.*]] = phi float [ %bc.merge.rdx, %scalar.ph ], [ %[[FADD2:.*]], %for.body ] +; CHECK-VF8UF1: %[[LOAD1:.*]] = load float, float* +; CHECK-VF8UF1: %[[FADD1:.*]] = fadd float %[[SUM]], %[[LOAD1]] +; CHECK-VF8UF1: %[[LOAD2:.*]] = load float, float* +; CHECK-VF8UF1: %[[FADD2]] = fadd float %[[FADD1]], %[[LOAD2]] +; CHECK-VF8UF1: for.end +; CHECK-VF8UF1: %[[RET:.*]] = phi float [ %[[FADD2]], %for.body ], [ %[[RDX]], %middle.block ] +; CHECK-VF8UF1: ret float %[[RET]] entry: br label %for.body @@ -240,12 +246,5 @@ ret float %rdx } -!0 = distinct !{!0, !3, !6, !8} -!1 = distinct !{!1, !3, !7, !8} -!2 = distinct !{!2, !4, !6, !8} -!3 = !{!"llvm.loop.vectorize.width", i32 8} -!4 = !{!"llvm.loop.vectorize.width", i32 4} -!5 = !{!"llvm.loop.vectorize.width", i32 2} -!6 = !{!"llvm.loop.interleave.count", i32 1} -!7 = !{!"llvm.loop.interleave.count", i32 4} -!8 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} +!0 = distinct !{!0, !1} +!1 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} Index: llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll +++ llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll @@ -1,14 +1,23 @@ -; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -S | FileCheck %s -check-prefix=CHECK +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -force-vector-width=8 -force-vector-interleave=1 -S | FileCheck %s -check-prefix=CHECK-VF8UF1 +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -force-vector-width=8 -force-vector-interleave=4 -S | FileCheck %s -check-prefix=CHECK-VF8UF4 +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -force-vector-width=4 -force-vector-interleave=1 -S | FileCheck %s -check-prefix=CHECK-VF4UF1 +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -force-vector-width=2 -force-vector-interleave=1 -S | FileCheck %s -check-prefix=CHECK-PRED +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -force-vector-width=1 -force-vector-interleave=1 -S | FileCheck %s -check-prefix=CHECK-SCALAR +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -pass-remarks=loop-vectorize -pass-remarks-analysis=loop-vectorize -S 2>%t | FileCheck %s -check-prefix=CHECK-NO-HINTS +; RUN: cat %t | FileCheck %s -check-prefix=CHECK-REMARKS +; CHECK-REMARKS: vectorized loop (vectorization width: 2, interleaved count: 2) define float @fadd_strict(float* noalias nocapture readonly %a, i64 %n) { -; CHECK-LABEL: @fadd_strict -; CHECK: vector.body: -; CHECK: %[[VEC_PHI:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] -; CHECK: %[[LOAD:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[RDX]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[VEC_PHI]], <8 x float> %[[LOAD]]) -; CHECK: for.end -; CHECK: %[[PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK: ret float %[[PHI]] +; CHECK-VF8UF1-LABEL: @fadd_strict +; CHECK-VF8UF1: vector.body: +; CHECK-VF8UF1: %[[VEC_PHI:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] +; CHECK-VF8UF1: %[[LOAD:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF1: %[[RDX]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[VEC_PHI]], <8 x float> %[[LOAD]]) +; CHECK-VF8UF1: for.end +; CHECK-VF8UF1: %[[PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX]], %middle.block ] +; CHECK-VF8UF1: ret float %[[PHI]] + +; CHECK-SCALAR-NOT: vector.body entry: br label %for.body @@ -20,28 +29,29 @@ %add = fadd float %0, %sum.07 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + br i1 %exitcond.not, label %for.end, label %for.body for.end: ret float %add } +; CHECK-REMARKS: vectorized loop (vectorization width: 2, interleaved count: 2) define float @fadd_strict_unroll(float* noalias nocapture readonly %a, i64 %n) { -; CHECK-LABEL: @fadd_strict_unroll -; CHECK: vector.body: -; CHECK: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4:.*]], %vector.body ] -; CHECK-NOT: phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4]], %vector.body ] -; CHECK: %[[LOAD1:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[LOAD2:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[LOAD3:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[LOAD4:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[RDX1:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[VEC_PHI1]], <8 x float> %[[LOAD1]]) -; CHECK: %[[RDX2:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX1]], <8 x float> %[[LOAD2]]) -; CHECK: %[[RDX3:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX2]], <8 x float> %[[LOAD3]]) -; CHECK: %[[RDX4]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX3]], <8 x float> %[[LOAD4]]) -; CHECK: for.end -; CHECK: %[[PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX4]], %middle.block ] -; CHECK: ret float %[[PHI]] +; CHECK-VF8UF4-LABEL: @fadd_strict_unroll +; CHECK-VF8UF4: vector.body: +; CHECK-VF8UF4: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4:.*]], %vector.body ] +; CHECK-VF8UF4-NOT: phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4]], %vector.body ] +; CHECK-VF8UF4: %[[LOAD1:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF4: %[[LOAD2:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF4: %[[LOAD3:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF4: %[[LOAD4:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF4: %[[RDX1:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[VEC_PHI1]], <8 x float> %[[LOAD1]]) +; CHECK-VF8UF4: %[[RDX2:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX1]], <8 x float> %[[LOAD2]]) +; CHECK-VF8UF4: %[[RDX3:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX2]], <8 x float> %[[LOAD3]]) +; CHECK-VF8UF4: %[[RDX4]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX3]], <8 x float> %[[LOAD4]]) +; CHECK-VF8UF4: for.end +; CHECK-VF8UF4: %[[PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX4]], %middle.block ] +; CHECK-VF8UF4: ret float %[[PHI]] entry: br label %for.body @@ -53,7 +63,7 @@ %add = fadd float %0, %sum.07 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !1 + br i1 %exitcond.not, label %for.end, label %for.body for.end: ret float %add @@ -67,30 +77,31 @@ ; } ; return sum; +; CHECK-REMARKS: vectorized loop (vectorization width: 2, interleaved count: 2) define float @fadd_strict_unroll_last_val(float* noalias nocapture readonly %a, float* noalias nocapture readonly %b, i64 %n) { -; CHECK-LABEL: @fadd_strict_unroll_last_val -; CHECK: vector.body -; CHECK: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4:.*]], %vector.body ] -; CHECK-NOT: phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4]], %vector.body ] -; CHECK: %[[LOAD1:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[LOAD2:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[LOAD3:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[LOAD4:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[RDX1:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[VEC_PHI1]], <8 x float> %[[LOAD1]]) -; CHECK: %[[RDX2:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX1]], <8 x float> %[[LOAD2]]) -; CHECK: %[[RDX3:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX2]], <8 x float> %[[LOAD3]]) -; CHECK: %[[RDX4]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX3]], <8 x float> %[[LOAD4]]) -; CHECK: for.body -; CHECK: %[[SUM_PHI:.*]] = phi float [ %[[FADD:.*]], %for.body ], [ {{.*}}, %scalar.ph ] -; CHECK: %[[LOAD5:.*]] = load float, float* -; CHECK: %[[FADD]] = fadd float %[[SUM_PHI]], %[[LOAD5]] -; CHECK: for.cond.cleanup -; CHECK: %[[FADD_LCSSA:.*]] = phi float [ %[[FADD]], %for.body ], [ %[[RDX4]], %middle.block ] -; CHECK: %[[FADD_42:.*]] = fadd float %[[FADD_LCSSA]], 4.200000e+01 -; CHECK: store float %[[FADD_42]], float* %b -; CHECK: for.end -; CHECK: %[[SUM_LCSSA:.*]] = phi float [ %[[FADD_LCSSA]], %for.cond.cleanup ], [ 0.000000e+00, %entry ] -; CHECK: ret float %[[SUM_LCSSA]] +; CHECK-VF8UF2-LABEL: @fadd_strict_unroll_last_val +; CHECK-VF8UF2: vector.body +; CHECK-VF8UF2: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4:.*]], %vector.body ] +; CHECK-VF8UF2-NOT: phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4]], %vector.body ] +; CHECK-VF8UF2: %[[LOAD1:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF2: %[[LOAD2:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF2: %[[LOAD3:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF2: %[[LOAD4:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF2: %[[RDX1:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[VEC_PHI1]], <8 x float> %[[LOAD1]]) +; CHECK-VF8UF2: %[[RDX2:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX1]], <8 x float> %[[LOAD2]]) +; CHECK-VF8UF2: %[[RDX3:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX2]], <8 x float> %[[LOAD3]]) +; CHECK-VF8UF2: %[[RDX4]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX3]], <8 x float> %[[LOAD4]]) +; CHECK-VF8UF2: for.body +; CHECK-VF8UF2: %[[SUM_PHI:.*]] = phi float [ %[[FADD:.*]], %for.body ], [ {{.*}}, %scalar.ph ] +; CHECK-VF8UF2: %[[LOAD5:.*]] = load float, float* +; CHECK-VF8UF2: %[[FADD]] = fadd float %[[SUM_PHI]], %[[LOAD5]] +; CHECK-VF8UF2: for.cond.cleanup +; CHECK-VF8UF2: %[[FADD_LCSSA:.*]] = phi float [ %[[FADD]], %for.body ], [ %[[RDX4]], %middle.block ] +; CHECK-VF8UF2: %[[FADD_42:.*]] = fadd float %[[FADD_LCSSA]], 4.200000e+01 +; CHECK-VF8UF2: store float %[[FADD_42]], float* %b +; CHECK-VF8UF2: for.end +; CHECK-VF8UF2: %[[SUM_LCSSA:.*]] = phi float [ %[[FADD_LCSSA]], %for.cond.cleanup ], [ 0.000000e+00, %entry ] +; CHECK-VF8UF2: ret float %[[SUM_LCSSA]] entry: %cmp = icmp sgt i64 %n, 0 br i1 %cmp, label %for.body, label %for.end @@ -103,7 +114,7 @@ %fadd = fadd float %sum, %0 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !1 + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body for.cond.cleanup: %fadd.lcssa = phi float [ %fadd, %for.body ] @@ -116,22 +127,23 @@ ret float %sum.lcssa } +; CHECK-REMARKS: vectorized loop (vectorization width: 2, interleaved count: 2) define void @fadd_strict_interleave(float* noalias nocapture readonly %a, float* noalias nocapture readonly %b, i64 %n) { -; CHECK-LABEL: @fadd_strict_interleave -; CHECK: entry -; CHECK: %[[ARRAYIDX:.*]] = getelementptr inbounds float, float* %a, i64 1 -; CHECK: %[[LOAD1:.*]] = load float, float* %a -; CHECK: %[[LOAD2:.*]] = load float, float* %[[ARRAYIDX]] -; CHECK: vector.body -; CHECK: %[[VEC_PHI1:.*]] = phi float [ %[[LOAD2]], %vector.ph ], [ %[[RDX2:.*]], %vector.body ] -; CHECK: %[[VEC_PHI2:.*]] = phi float [ %[[LOAD1]], %vector.ph ], [ %[[RDX1:.*]], %vector.body ] -; CHECK: %[[WIDE_LOAD:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[STRIDED1:.*]] = shufflevector <8 x float> %[[WIDE_LOAD]], <8 x float> poison, <4 x i32> -; CHECK: %[[STRIDED2:.*]] = shufflevector <8 x float> %[[WIDE_LOAD]], <8 x float> poison, <4 x i32> -; CHECK: %[[RDX1]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI2]], <4 x float> %[[STRIDED1]]) -; CHECK: %[[RDX2]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI1]], <4 x float> %[[STRIDED2]]) -; CHECK: for.end -; CHECK ret void +; CHECK-VF4UF1-LABEL: @fadd_strict_interleave +; CHECK-VF4UF1: entry +; CHECK-VF4UF1: %[[ARRAYIDX:.*]] = getelementptr inbounds float, float* %a, i64 1 +; CHECK-VF4UF1: %[[LOAD1:.*]] = load float, float* %a +; CHECK-VF4UF1: %[[LOAD2:.*]] = load float, float* %[[ARRAYIDX]] +; CHECK-VF4UF1: vector.body +; CHECK-VF4UF1: %[[VEC_PHI1:.*]] = phi float [ %[[LOAD2]], %vector.ph ], [ %[[RDX2:.*]], %vector.body ] +; CHECK-VF4UF1: %[[VEC_PHI2:.*]] = phi float [ %[[LOAD1]], %vector.ph ], [ %[[RDX1:.*]], %vector.body ] +; CHECK-VF4UF1: %[[WIDE_LOAD:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF4UF1: %[[STRIDED1:.*]] = shufflevector <8 x float> %[[WIDE_LOAD]], <8 x float> poison, <4 x i32> +; CHECK-VF4UF1: %[[STRIDED2:.*]] = shufflevector <8 x float> %[[WIDE_LOAD]], <8 x float> poison, <4 x i32> +; CHECK-VF4UF1: %[[RDX1]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI2]], <4 x float> %[[STRIDED1]]) +; CHECK-VF4UF1: %[[RDX2]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI1]], <4 x float> %[[STRIDED2]]) +; CHECK-VF4UF1: for.end +; CHECK-VF4UF1: ret void entry: %arrayidxa = getelementptr inbounds float, float* %a, i64 1 %a1 = load float, float* %a, align 4 @@ -151,7 +163,7 @@ %add2 = fadd float %1, %add.phi1 %iv.next = add nuw nsw i64 %iv, 2 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !2 + br i1 %exitcond.not, label %for.end, label %for.body for.end: store float %add1, float* %a, align 4 @@ -159,19 +171,20 @@ ret void } +; CHECK-REMARKS: vectorized loop (vectorization width: 4, interleaved count: 2) define float @fadd_invariant(float* noalias nocapture readonly %a, float* noalias nocapture readonly %b, i64 %n) { -; CHECK-LABEL: @fadd_invariant -; CHECK: vector.body -; CHECK: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] -; CHECK: %[[LOAD1:.*]] = load <4 x float>, <4 x float>* -; CHECK: %[[LOAD2:.*]] = load <4 x float>, <4 x float>* -; CHECK: %[[ADD:.*]] = fadd <4 x float> %[[LOAD1]], %[[LOAD2]] -; CHECK: %[[RDX]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI1]], <4 x float> %[[ADD]]) -; CHECK: for.end.loopexit -; CHECK: %[[EXIT_PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK: for.end -; CHECK: %[[PHI:.*]] = phi float [ 0.000000e+00, %entry ], [ %[[EXIT_PHI]], %for.end.loopexit ] -; CHECK: ret float %[[PHI]] +; CHECK-VF4UF1-LABEL: @fadd_invariant +; CHECK-VF4UF1: vector.body +; CHECK-VF4UF1: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] +; CHECK-VF4UF1: %[[LOAD1:.*]] = load <4 x float>, <4 x float>* +; CHECK-VF4UF1: %[[LOAD2:.*]] = load <4 x float>, <4 x float>* +; CHECK-VF4UF1: %[[ADD:.*]] = fadd <4 x float> %[[LOAD1]], %[[LOAD2]] +; CHECK-VF4UF1: %[[RDX]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI1]], <4 x float> %[[ADD]]) +; CHECK-VF4UF1: for.end.loopexit +; CHECK-VF4UF1: %[[EXIT_PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX]], %middle.block ] +; CHECK-VF4UF1: for.end +; CHECK-VF4UF1: %[[PHI:.*]] = phi float [ 0.000000e+00, %entry ], [ %[[EXIT_PHI]], %for.end.loopexit ] +; CHECK-VF4UF1: ret float %[[PHI]] entry: %arrayidx = getelementptr inbounds float, float* %a, i64 1 %0 = load float, float* %arrayidx, align 4 @@ -189,40 +202,41 @@ %rdx = fadd float %res.014, %add %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !2 + br i1 %exitcond.not, label %for.end, label %for.body for.end: ; preds = %for.body, %entry %res = phi float [ 0.000000e+00, %entry ], [ %rdx, %for.body ] ret float %res } +; CHECK-REMARKS: the cost-model indicates that vectorization is not beneficial define float @fadd_conditional(float* noalias nocapture readonly %a, float* noalias nocapture readonly %b, i64 %n) { -; CHECK-LABEL: @fadd_conditional -; CHECK: vector.body: -; CHECK: %[[PHI:.*]] = phi float [ 1.000000e+00, %vector.ph ], [ %[[RDX:.*]], %pred.load.continue6 ] -; CHECK: %[[LOAD1:.*]] = load <4 x float>, <4 x float>* -; CHECK: %[[FCMP1:.*]] = fcmp une <4 x float> %[[LOAD1]], zeroinitializer -; CHECK: %[[EXTRACT:.*]] = extractelement <4 x i1> %[[FCMP1]], i32 0 -; CHECK: br i1 %[[EXTRACT]], label %pred.load.if, label %pred.load.continue -; CHECK: pred.load.continue6 -; CHECK: %[[PHI1:.*]] = phi <4 x float> [ %[[PHI0:.*]], %pred.load.continue4 ], [ %[[INS_ELT:.*]], %pred.load.if5 ] -; CHECK: %[[XOR:.*]] = xor <4 x i1> %[[FCMP1]], -; CHECK: %[[PRED:.*]] = select <4 x i1> %[[XOR]], <4 x float> , <4 x float> %[[PHI1]] -; CHECK: %[[RDX]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[PHI]], <4 x float> %[[PRED]]) -; CHECK: for.body -; CHECK: %[[RES_PHI:.*]] = phi float [ %[[MERGE_RDX:.*]], %scalar.ph ], [ %[[FADD:.*]], %for.inc ] -; CHECK: %[[LOAD2:.*]] = load float, float* -; CHECK: %[[FCMP2:.*]] = fcmp une float %[[LOAD2]], 0.000000e+00 -; CHECK: br i1 %[[FCMP2]], label %if.then, label %for.inc -; CHECK: if.then -; CHECK: %[[LOAD3:.*]] = load float, float* -; CHECK: br label %for.inc -; CHECK: for.inc -; CHECK: %[[PHI2:.*]] = phi float [ %[[LOAD3]], %if.then ], [ 3.000000e+00, %for.body ] -; CHECK: %[[FADD]] = fadd float %[[RES_PHI]], %[[PHI2]] -; CHECK: for.end -; CHECK: %[[RDX_PHI:.*]] = phi float [ %[[FADD]], %for.inc ], [ %[[RDX]], %middle.block ] -; CHECK: ret float %[[RDX_PHI]] +; CHECK-VF4UF1-LABEL: @fadd_conditional +; CHECK-VF4UF1: vector.body: +; CHECK-VF4UF1: %[[PHI:.*]] = phi float [ 1.000000e+00, %vector.ph ], [ %[[RDX:.*]], %pred.load.continue6 ] +; CHECK-VF4UF1: %[[LOAD1:.*]] = load <4 x float>, <4 x float>* +; CHECK-VF4UF1: %[[FCMP1:.*]] = fcmp une <4 x float> %[[LOAD1]], zeroinitializer +; CHECK-VF4UF1: %[[EXTRACT:.*]] = extractelement <4 x i1> %[[FCMP1]], i32 0 +; CHECK-VF4UF1: br i1 %[[EXTRACT]], label %pred.load.if, label %pred.load.continue +; CHECK-VF4UF1: pred.load.continue6 +; CHECK-VF4UF1: %[[PHI1:.*]] = phi <4 x float> [ %[[PHI0:.*]], %pred.load.continue4 ], [ %[[INS_ELT:.*]], %pred.load.if5 ] +; CHECK-VF4UF1: %[[XOR:.*]] = xor <4 x i1> %[[FCMP1]], +; CHECK-VF4UF1: %[[PRED:.*]] = select <4 x i1> %[[XOR]], <4 x float> , <4 x float> %[[PHI1]] +; CHECK-VF4UF1: %[[RDX]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[PHI]], <4 x float> %[[PRED]]) +; CHECK-VF4UF1: for.body +; CHECK-VF4UF1: %[[RES_PHI:.*]] = phi float [ %[[MERGE_RDX:.*]], %scalar.ph ], [ %[[FADD:.*]], %for.inc ] +; CHECK-VF4UF1: %[[LOAD2:.*]] = load float, float* +; CHECK-VF4UF1: %[[FCMP2:.*]] = fcmp une float %[[LOAD2]], 0.000000e+00 +; CHECK-VF4UF1: br i1 %[[FCMP2]], label %if.then, label %for.inc +; CHECK-VF4UF1: if.then +; CHECK-VF4UF1: %[[LOAD3:.*]] = load float, float* +; CHECK-VF4UF1: br label %for.inc +; CHECK-VF4UF1: for.inc +; CHECK-VF4UF1: %[[PHI2:.*]] = phi float [ %[[LOAD3]], %if.then ], [ 3.000000e+00, %for.body ] +; CHECK-VF4UF1: %[[FADD]] = fadd float %[[RES_PHI]], %[[PHI2]] +; CHECK-VF4UF1: for.end +; CHECK-VF4UF1: %[[RDX_PHI:.*]] = phi float [ %[[FADD]], %for.inc ], [ %[[RDX]], %middle.block ] +; CHECK-VF4UF1: ret float %[[RDX_PHI]] entry: br label %for.body @@ -244,7 +258,7 @@ %fadd = fadd float %res, %phi %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !2 + br i1 %exitcond.not, label %for.end, label %for.body for.end: %rdx = phi float [ %fadd, %for.inc ] @@ -252,21 +266,22 @@ } ; Test to check masking correct, using the "llvm.loop.vectorize.predicate.enable" attribute +; CHECK-REMARKS: interleaved loop (interleaved count: 2) define float @fadd_predicated(float* noalias nocapture %a, i64 %n) { -; CHECK-LABEL: @fadd_predicated -; CHECK: vector.ph -; CHECK: %[[TRIP_MINUS_ONE:.*]] = sub i64 %n, 1 -; CHECK: %[[BROADCAST_INS:.*]] = insertelement <2 x i64> poison, i64 %[[TRIP_MINUS_ONE]], i32 0 -; CHECK: %[[SPLAT:.*]] = shufflevector <2 x i64> %[[BROADCAST_INS]], <2 x i64> poison, <2 x i32> zeroinitializer -; CHECK: vector.body -; CHECK: %[[RDX_PHI:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %pred.load.continue2 ] -; CHECK: pred.load.continue2 -; CHECK: %[[PHI:.*]] = phi <2 x float> [ %[[PHI0:.*]], %pred.load.continue ], [ %[[INS_ELT:.*]], %pred.load.if1 ] -; CHECK: %[[MASK:.*]] = select <2 x i1> %0, <2 x float> %[[PHI]], <2 x float> -; CHECK: %[[RDX]] = call float @llvm.vector.reduce.fadd.v2f32(float %[[RDX_PHI]], <2 x float> %[[MASK]]) -; CHECK: for.end: -; CHECK: %[[RES_PHI:.*]] = phi float [ %[[FADD:.*]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK: ret float %[[RES_PHI]] +; CHECK-PRED-LABEL: @fadd_predicated +; CHECK-PRED: vector.ph +; CHECK-PRED: %[[TRIP_MINUS_ONE:.*]] = sub i64 %n, 1 +; CHECK-PRED: %[[BROADCAST_INS:.*]] = insertelement <2 x i64> poison, i64 %[[TRIP_MINUS_ONE]], i32 0 +; CHECK-PRED: %[[SPLAT:.*]] = shufflevector <2 x i64> %[[BROADCAST_INS]], <2 x i64> poison, <2 x i32> zeroinitializer +; CHECK-PRED: vector.body +; CHECK-PRED: %[[RDX_PHI:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %pred.load.continue2 ] +; CHECK-PRED: pred.load.continue2 +; CHECK-PRED: %[[PHI:.*]] = phi <2 x float> [ %[[PHI0:.*]], %pred.load.continue ], [ %[[INS_ELT:.*]], %pred.load.if1 ] +; CHECK-PRED: %[[MASK:.*]] = select <2 x i1> %0, <2 x float> %[[PHI]], <2 x float> +; CHECK-PRED: %[[RDX]] = call float @llvm.vector.reduce.fadd.v2f32(float %[[RDX_PHI]], <2 x float> %[[MASK]]) +; CHECK-PRED: for.end: +; CHECK-PRED: %[[RES_PHI:.*]] = phi float [ %[[FADD:.*]], %for.body ], [ %[[RDX]], %middle.block ] +; CHECK-PRED: ret float %[[RES_PHI]] entry: br label %for.body @@ -278,33 +293,71 @@ %l7 = fadd float %sum.02, %l3 %iv.next = add i64 %iv, 1 %exitcond = icmp eq i64 %iv.next, %n - br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !3 + br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0 for.end: ; preds = %for.body %sum.0.lcssa = phi float [ %l7, %for.body ] ret float %sum.0.lcssa } +; Test we can vectorize a loop both an FAdd which we can vectorize inloop and an integer add +; CHECK-REMARKS: vectorized loop (vectorization width: 2, interleaved count: 2) +define float @fadd_mixed(float* noalias nocapture readonly %a, i64* noalias nocapture readonly %b, i64 %n) { +; CHECK-LABEL-VF4UF1: @fadd_mixed +; CHECK-VF4UF1: vector.body: +; CHECK-VF4UF1: %[[VEC_PHI_F32:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[FADD_RDX:.*]], %vector.body ] +; CHECK-VF4UF1: %[[VEC_PHI_INT:.*]] = phi <4 x i64> [ zeroinitializer, %vector.ph ], [ %[[ADD:.*]], %vector.body ] +; CHECK-VF4UF1: %[[LOAD:.*]] = load <4 x float>, <4 x float>* +; CHECK-VF4UF1: %[[FADD_RDX]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI_F32]], <4 x float> %[[LOAD]]) +; CHECK-VF4UF1: %[[ADD]] = add <4 x i64> %vec.ind, %[[VEC_PHI_INT]] +; CHECK-VF4UF1: middle.block +; CHECK-VF4UF1: %[[ADD_RDX:.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %[[ADD]]) +; CHECK-VF4UF1: for.end +; CHECK-VF4UF1: %[[FADD_PHI:.*]] = phi float [ %[[SCALAR_FADD:.*]], %for.body ], [ %[[FADD_RDX]], %middle.block ] +; CHECK-VF4UF1: %[[ADD_PHI:.*]] = phi i64 [ %[[SCALAR_ADD:.*]], %for.body ], [ %[[ADD_RDX]], %middle.block ] +; CHECK-VF4UF1: store i64 %[[ADD_PHI]], i64* %b +; CHECK-VF4UF1: ret float %[[FADD_PHI]] +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.fadd = phi float [ 0.000000e+00, %entry ], [ %fadd, %for.body ] + %sum.add = phi i64 [ 0, %entry ], [ %add, %for.body ] + %arrayidx = getelementptr inbounds float, float* %a, i64 %iv + %0 = load float, float* %arrayidx, align 4 + %fadd = fadd float %0, %sum.fadd + %add = add i64 %iv, %sum.add + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body + +for.end: + store i64 %add, i64* %b, align 4 + ret float %fadd +} + ; Negative test - loop contains multiple fadds which we cannot safely reorder +; CHECK-REMARKS: loop not vectorized: cannot prove it is safe to reorder floating-point operations define float @fadd_multiple(float* noalias nocapture %a, float* noalias nocapture %b, i64 %n) { -; CHECK-LABEL: @fadd_multiple -; CHECK: vector.body -; CHECK: %[[PHI:.*]] = phi <8 x float> [ , %vector.ph ], [ %[[VEC_FADD2:.*]], %vector.body ] -; CHECK: %[[VEC_LOAD1:.*]] = load <8 x float>, <8 x float> -; CHECK: %[[VEC_FADD1:.*]] = fadd <8 x float> %[[PHI]], %[[VEC_LOAD1]] -; CHECK: %[[VEC_LOAD2:.*]] = load <8 x float>, <8 x float> -; CHECK: %[[VEC_FADD2]] = fadd <8 x float> %[[VEC_FADD1]], %[[VEC_LOAD2]] -; CHECK: middle.block -; CHECK: %[[RDX:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> %[[VEC_FADD2]]) -; CHECK: for.body -; CHECK: %[[SUM:.*]] = phi float [ %bc.merge.rdx, %scalar.ph ], [ %[[FADD2:.*]], %for.body ] -; CHECK: %[[LOAD1:.*]] = load float, float* -; CHECK: %[[FADD1:.*]] = fadd float %sum, %[[LOAD1]] -; CHECK: %[[LOAD2:.*]] = load float, float* -; CHECK: %[[FADD2]] = fadd float %[[FADD1]], %[[LOAD2]] -; CHECK: for.end -; CHECK: %[[RET:.*]] = phi float [ %[[FADD2]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK: ret float %[[RET]] +; CHECK-VF8UF1-LABEL: @fadd_multiple +; CHECK-VF8UF1: vector.body +; CHECK-VF8UF1: %[[PHI:.*]] = phi <8 x float> [ , %vector.ph ], [ %[[VEC_FADD2:.*]], %vector.body ] +; CHECK-VF8UF1: %[[VEC_LOAD1:.*]] = load <8 x float>, <8 x float> +; CHECK-VF8UF1: %[[VEC_FADD1:.*]] = fadd <8 x float> %[[PHI]], %[[VEC_LOAD1]] +; CHECK-VF8UF1: %[[VEC_LOAD2:.*]] = load <8 x float>, <8 x float> +; CHECK-VF8UF1: %[[VEC_FADD2]] = fadd <8 x float> %[[VEC_FADD1]], %[[VEC_LOAD2]] +; CHECK-VF8UF1: middle.block +; CHECK-VF8UF1: %[[RDX:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> %[[VEC_FADD2]]) +; CHECK-VF8UF1: for.body +; CHECK-VF8UF1: %[[SUM:.*]] = phi float [ %bc.merge.rdx, %scalar.ph ], [ %[[FADD2:.*]], %for.body ] +; CHECK-VF8UF1: %[[LOAD1:.*]] = load float, float* +; CHECK-VF8UF1: %[[FADD1:.*]] = fadd float %sum, %[[LOAD1]] +; CHECK-VF8UF1: %[[LOAD2:.*]] = load float, float* +; CHECK-VF8UF1: %[[FADD2]] = fadd float %[[FADD1]], %[[LOAD2]] +; CHECK-VF8UF1: for.end +; CHECK-VF8UF1: %[[RET:.*]] = phi float [ %[[FADD2]], %for.body ], [ %[[RDX]], %middle.block ] +; CHECK-VF8UF1: ret float %[[RET]] entry: br label %for.body @@ -319,21 +372,133 @@ %add3 = fadd float %add, %1 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + br i1 %exitcond.not, label %for.end, label %for.body for.end: ; preds = %for.body %rdx = phi float [ %add3, %for.body ] ret float %rdx } -!0 = distinct !{!0, !4, !7, !9} -!1 = distinct !{!1, !4, !8, !9} -!2 = distinct !{!2, !5, !7, !9} -!3 = distinct !{!3, !6, !7, !9, !10} -!4 = !{!"llvm.loop.vectorize.width", i32 8} -!5 = !{!"llvm.loop.vectorize.width", i32 4} -!6 = !{!"llvm.loop.vectorize.width", i32 2} -!7 = !{!"llvm.loop.interleave.count", i32 1} -!8 = !{!"llvm.loop.interleave.count", i32 4} -!9 = !{!"llvm.loop.vectorize.enable", i1 true} -!10 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} +; Tests with both a floating point reduction & induction, e.g. +; +;float fp_iv_rdx_loop(float *values, float init, float * __restrict__ A, int N) { +; float fp_inc = 2.0; +; float x = init; +; float sum = 0.0; +; for (int i=0; i < N; ++i) { +; A[i] = x; +; x += fp_inc; +; sum += values[i]; +; } +; return sum; +;} + +; Test which includes a reduction which could be performed in-loop, but which also has an FP induction +; variable which cannot be vectorized. +; CHECK-REMARKS: loop not vectorized: cannot prove it is safe to reorder floating-point operations +define float @induction_and_reduction(float* nocapture readonly %values, float %init, float* noalias nocapture %A, i64 %N) { +; CHECK-NO-HINTS-LABEL: @induction_and_reduction +; CHECK-NO-HINTS-NOT: vector.body +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.015 = phi float [ 0.000000e+00, %entry ], [ %add3, %for.body ] + %x.014 = phi float [ %init, %entry ], [ %add, %for.body ] + %arrayidx = getelementptr inbounds float, float* %A, i64 %iv + store float %x.014, float* %arrayidx, align 4 + %add = fadd float %x.014, 2.000000e+00 + %arrayidx2 = getelementptr inbounds float, float* %values, i64 %iv + %0 = load float, float* %arrayidx2, align 4 + %add3 = fadd float %sum.015, %0 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.end, label %for.body + +for.end: + %add3.lcssa = phi float [ %add3, %for.body ] + ret float %add3.lcssa +} + +; As above, but the floating-point induction is 'fast' +; CHECK-REMARKS: vectorized loop (vectorization width: 4, interleaved count: 2) +define float @fast_induction_and_reduction(float* nocapture readonly %values, float %init, float* noalias nocapture %A, i64 %N) { +; CHECK-NO-HINTS-LABEL: @fast_induction_and_reduction +; CHECK-NO-HINTS: vector.ph +; CHECK-NO-HINTS: %[[INDUCTION:.*]] = fadd fast <4 x float> {{.*}}, +; CHECK-NO-HINTS: vector.body +; CHECK-NO-HINTS: %[[RDX_PHI:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[FADD2:.*]], %vector.body ] +; CHECK-NO-HINTS: %[[IND_PHI:.*]] = phi <4 x float> [ %[[INDUCTION]], %vector.ph ], [ %[[VEC_IND_NEXT:.*]], %vector.body ] +; CHECK-NO-HINTS: %[[STEP_ADD:.*]] = fadd fast <4 x float> %[[IND_PHI]], +; CHECK-NO-HINTS: %[[LOAD1:.*]] = load <4 x float>, <4 x float>* +; CHECK-NO-HINTS: %[[LOAD2:.*]] = load <4 x float>, <4 x float>* +; CHECK-NO-HINTS: %[[FADD1:.*]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[RDX_PHI]], <4 x float> %[[LOAD1]]) +; CHECK-NO-HINTS: %[[FADD2]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[FADD1]], <4 x float> %[[LOAD2]]) +; CHECK-NO-HINTS: %[[VEC_IND_NEXT]] = fadd fast <4 x float> %[[STEP_ADD]], +; CHECK-NO-HINTS: for.body +; CHECK-NO-HINTS: %[[RDX_SUM_PHI:.*]] = phi float [ {{.*}}, %scalar.ph ], [ %[[FADD3:.*]], %for.body ] +; CHECK-NO-HINTS: %[[IND_SUM_PHI:.*]] = phi fast float [ {{.*}}, %scalar.ph ], [ %[[ADD_IND:.*]], %for.body ] +; CHECK-NO-HINTS: store float %[[IND_SUM_PHI]], float* +; CHECK-NO-HINTS: %[[ADD_IND]] = fadd fast float %[[IND_SUM_PHI]], 2.000000e+00 +; CHECK-NO-HINTS: %[[LOAD3:.*]] = load float, float* +; CHECK-NO-HINTS: %[[FADD3]] = fadd float %[[RDX_SUM_PHI]], %[[LOAD3]] +; CHECK-NO-HINTS: for.end +; CHECK-NO-HINTS: %[[RES_PHI:.*]] = phi float [ %[[FADD3]], %for.body ], [ %[[FADD2]], %middle.block ] +; CHECK-NO-HINTS: ret float %[[RES_PHI]] +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum.015 = phi float [ 0.000000e+00, %entry ], [ %add3, %for.body ] + %x.014 = phi fast float [ %init, %entry ], [ %add, %for.body ] + %arrayidx = getelementptr inbounds float, float* %A, i64 %iv + store float %x.014, float* %arrayidx, align 4 + %add = fadd fast float %x.014, 2.000000e+00 + %arrayidx2 = getelementptr inbounds float, float* %values, i64 %iv + %0 = load float, float* %arrayidx2, align 4 + %add3 = fadd float %sum.015, %0 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.end, label %for.body + +for.end: + %add3.lcssa = phi float [ %add3, %for.body ] + ret float %add3.lcssa +} + +; The FP induction is fast, but here we can't vectorize as only one of the reductions is an FAdd that can be performed in-loop +; CHECK-REMARKS: loop not vectorized: cannot prove it is safe to reorder floating-point operations +define float @fast_induction_unordered_reduction(float* nocapture readonly %values, float %init, float* noalias nocapture %A, float* noalias nocapture %B, i64 %N) { +; CHECK-LABEL: @fast_induction_unordered_reduction +; CHECK-NOT: vector.body +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %sum2.023 = phi float [ 3.000000e+00, %entry ], [ %mul, %for.body ] + %sum.022 = phi float [ 0.000000e+00, %entry ], [ %add3, %for.body ] + %x.021 = phi float [ %init, %entry ], [ %add, %for.body ] + %arrayidx = getelementptr inbounds float, float* %A, i64 %iv + store float %x.021, float* %arrayidx, align 4 + %add = fadd fast float %x.021, 2.000000e+00 + %arrayidx2 = getelementptr inbounds float, float* %values, i64 %iv + %0 = load float, float* %arrayidx2, align 4 + %add3 = fadd float %sum.022, %0 + %mul = fmul float %sum2.023, %0 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.end, label %for.body + +for.end: + %add3.lcssa = phi float [ %add3, %for.body ] + %mul.lcssa = phi float [ %mul, %for.body ] + %add6 = fadd float %add3.lcssa, %mul.lcssa + ret float %add6 + +} + +!0 = distinct !{!0, !1} +!1 = !{!"llvm.loop.vectorize.predicate.enable", i1 true}