Index: llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h =================================================================== --- llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h +++ llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h @@ -143,7 +143,7 @@ // error accumulates in the loop. ElementCount EC = getWidth(); return getForce() == LoopVectorizeHints::FK_Enabled || - EC.getKnownMinValue() > 1; + (EC.isNonZero() && !EC.isScalar()); } bool isPotentiallyUnsafe() const { Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -9649,6 +9649,20 @@ } } +static bool canVectorizeOrderedFPMath(LoopVectorizationLegality LVL, + LoopVectorizeHints &Hints) { + if (Hints.allowReordering()) + return true; + + if (!EnableStrictReductions || Hints.getWidth().isScalar()) + return false; + + return (all_of(LVL.getReductionVars(), [&](auto &Reduction) -> bool { + RecurrenceDescriptor RdxDesc = Reduction.second; + return RdxDesc.isOrdered(); + })); +} + LoopVectorizePass::LoopVectorizePass(LoopVectorizeOptions Opts) : InterleaveOnlyWhenForced(Opts.InterleaveOnlyWhenForced || !EnableLoopInterleaving), @@ -9766,7 +9780,8 @@ return false; } - if (!Requirements.canVectorizeFPMath(Hints)) { + if (!Requirements.canVectorizeFPMath(Hints) && + !canVectorizeOrderedFPMath(LVL, Hints)) { ORE->emit([&]() { auto *ExactFPMathInst = Requirements.getExactFPInst(); return OptimizationRemarkAnalysisFPCommute(DEBUG_TYPE, "CantReorderFPOps", Index: llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll +++ llvm/test/Transforms/LoopVectorize/AArch64/strict-fadd.ll @@ -1,14 +1,20 @@ -; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -S | FileCheck %s -check-prefix=CHECK +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -force-vector-width=8 -force-vector-interleave=1 -S | FileCheck %s -check-prefix=CHECK-VF8UF1 +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -force-vector-width=8 -force-vector-interleave=4 -S | FileCheck %s -check-prefix=CHECK-VF8UF4 +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -force-vector-width=4 -force-vector-interleave=1 -S | FileCheck %s -check-prefix=CHECK-VF4UF1 +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -force-vector-width=2 -force-vector-interleave=1 -S | FileCheck %s -check-prefix=CHECK-PRED +; RUN: opt < %s -loop-vectorize -mtriple aarch64-unknown-linux-gnu -enable-strict-reductions -pass-remarks=loop-vectorize -pass-remarks-analysis=loop-vectorize -S 2>%t +; RUN: cat %t | FileCheck %s -check-prefix=CHECK-REMARKS +; CHECK-REMARKS: vectorized loop (vectorization width: 2, interleaved count: 2) define float @fadd_strict(float* noalias nocapture readonly %a, i64 %n) { -; CHECK-LABEL: @fadd_strict -; CHECK: vector.body: -; CHECK: %[[VEC_PHI:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] -; CHECK: %[[LOAD:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[RDX]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[VEC_PHI]], <8 x float> %[[LOAD]]) -; CHECK: for.end -; CHECK: %[[PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK: ret float %[[PHI]] +; CHECK-VF8UF1-LABEL: @fadd_strict +; CHECK-VF8UF1: vector.body: +; CHECK-VF8UF1: %[[VEC_PHI:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] +; CHECK-VF8UF1: %[[LOAD:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF1: %[[RDX]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[VEC_PHI]], <8 x float> %[[LOAD]]) +; CHECK-VF8UF1: for.end +; CHECK-VF8UF1: %[[PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX]], %middle.block ] +; CHECK-VF8UF1: ret float %[[PHI]] entry: br label %for.body @@ -20,28 +26,29 @@ %add = fadd float %0, %sum.07 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + br i1 %exitcond.not, label %for.end, label %for.body for.end: ret float %add } +; CHECK-REMARKS: vectorized loop (vectorization width: 2, interleaved count: 2) define float @fadd_strict_unroll(float* noalias nocapture readonly %a, i64 %n) { -; CHECK-LABEL: @fadd_strict_unroll -; CHECK: vector.body: -; CHECK: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4:.*]], %vector.body ] -; CHECK-NOT: phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4]], %vector.body ] -; CHECK: %[[LOAD1:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[LOAD2:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[LOAD3:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[LOAD4:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[RDX1:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[VEC_PHI1]], <8 x float> %[[LOAD1]]) -; CHECK: %[[RDX2:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX1]], <8 x float> %[[LOAD2]]) -; CHECK: %[[RDX3:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX2]], <8 x float> %[[LOAD3]]) -; CHECK: %[[RDX4]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX3]], <8 x float> %[[LOAD4]]) -; CHECK: for.end -; CHECK: %[[PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX4]], %middle.block ] -; CHECK: ret float %[[PHI]] +; CHECK-VF8UF4-LABEL: @fadd_strict_unroll +; CHECK-VF8UF4: vector.body: +; CHECK-VF8UF4: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4:.*]], %vector.body ] +; CHECK-VF8UF4-NOT: phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4]], %vector.body ] +; CHECK-VF8UF4: %[[LOAD1:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF4: %[[LOAD2:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF4: %[[LOAD3:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF4: %[[LOAD4:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF4: %[[RDX1:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[VEC_PHI1]], <8 x float> %[[LOAD1]]) +; CHECK-VF8UF4: %[[RDX2:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX1]], <8 x float> %[[LOAD2]]) +; CHECK-VF8UF4: %[[RDX3:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX2]], <8 x float> %[[LOAD3]]) +; CHECK-VF8UF4: %[[RDX4]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX3]], <8 x float> %[[LOAD4]]) +; CHECK-VF8UF4: for.end +; CHECK-VF8UF4: %[[PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX4]], %middle.block ] +; CHECK-VF8UF4: ret float %[[PHI]] entry: br label %for.body @@ -53,7 +60,7 @@ %add = fadd float %0, %sum.07 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !1 + br i1 %exitcond.not, label %for.end, label %for.body for.end: ret float %add @@ -67,30 +74,31 @@ ; } ; return sum; +; CHECK-REMARKS: vectorized loop (vectorization width: 2, interleaved count: 2) define float @fadd_strict_unroll_last_val(float* noalias nocapture readonly %a, float* noalias nocapture readonly %b, i64 %n) { -; CHECK-LABEL: @fadd_strict_unroll_last_val -; CHECK: vector.body -; CHECK: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4:.*]], %vector.body ] -; CHECK-NOT: phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4]], %vector.body ] -; CHECK: %[[LOAD1:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[LOAD2:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[LOAD3:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[LOAD4:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[RDX1:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[VEC_PHI1]], <8 x float> %[[LOAD1]]) -; CHECK: %[[RDX2:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX1]], <8 x float> %[[LOAD2]]) -; CHECK: %[[RDX3:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX2]], <8 x float> %[[LOAD3]]) -; CHECK: %[[RDX4]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX3]], <8 x float> %[[LOAD4]]) -; CHECK: for.body -; CHECK: %[[SUM_PHI:.*]] = phi float [ %[[FADD:.*]], %for.body ], [ {{.*}}, %scalar.ph ] -; CHECK: %[[LOAD5:.*]] = load float, float* -; CHECK: %[[FADD]] = fadd float %[[SUM_PHI]], %[[LOAD5]] -; CHECK: for.cond.cleanup -; CHECK: %[[FADD_LCSSA:.*]] = phi float [ %[[FADD]], %for.body ], [ %[[RDX4]], %middle.block ] -; CHECK: %[[FADD_42:.*]] = fadd float %[[FADD_LCSSA]], 4.200000e+01 -; CHECK: store float %[[FADD_42]], float* %b -; CHECK: for.end -; CHECK: %[[SUM_LCSSA:.*]] = phi float [ %[[FADD_LCSSA]], %for.cond.cleanup ], [ 0.000000e+00, %entry ] -; CHECK: ret float %[[SUM_LCSSA]] +; CHECK-VF8UF2-LABEL: @fadd_strict_unroll_last_val +; CHECK-VF8UF2: vector.body +; CHECK-VF8UF2: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4:.*]], %vector.body ] +; CHECK-VF8UF2-NOT: phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX4]], %vector.body ] +; CHECK-VF8UF2: %[[LOAD1:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF2: %[[LOAD2:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF2: %[[LOAD3:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF2: %[[LOAD4:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF8UF2: %[[RDX1:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[VEC_PHI1]], <8 x float> %[[LOAD1]]) +; CHECK-VF8UF2: %[[RDX2:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX1]], <8 x float> %[[LOAD2]]) +; CHECK-VF8UF2: %[[RDX3:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX2]], <8 x float> %[[LOAD3]]) +; CHECK-VF8UF2: %[[RDX4]] = call float @llvm.vector.reduce.fadd.v8f32(float %[[RDX3]], <8 x float> %[[LOAD4]]) +; CHECK-VF8UF2: for.body +; CHECK-VF8UF2: %[[SUM_PHI:.*]] = phi float [ %[[FADD:.*]], %for.body ], [ {{.*}}, %scalar.ph ] +; CHECK-VF8UF2: %[[LOAD5:.*]] = load float, float* +; CHECK-VF8UF2: %[[FADD]] = fadd float %[[SUM_PHI]], %[[LOAD5]] +; CHECK-VF8UF2: for.cond.cleanup +; CHECK-VF8UF2: %[[FADD_LCSSA:.*]] = phi float [ %[[FADD]], %for.body ], [ %[[RDX4]], %middle.block ] +; CHECK-VF8UF2: %[[FADD_42:.*]] = fadd float %[[FADD_LCSSA]], 4.200000e+01 +; CHECK-VF8UF2: store float %[[FADD_42]], float* %b +; CHECK-VF8UF2: for.end +; CHECK-VF8UF2: %[[SUM_LCSSA:.*]] = phi float [ %[[FADD_LCSSA]], %for.cond.cleanup ], [ 0.000000e+00, %entry ] +; CHECK-VF8UF2: ret float %[[SUM_LCSSA]] entry: %cmp = icmp sgt i64 %n, 0 br i1 %cmp, label %for.body, label %for.end @@ -103,7 +111,7 @@ %fadd = fadd float %sum, %0 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !1 + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body for.cond.cleanup: %fadd.lcssa = phi float [ %fadd, %for.body ] @@ -116,22 +124,23 @@ ret float %sum.lcssa } +; CHECK-REMARKS: vectorized loop (vectorization width: 2, interleaved count: 2) define void @fadd_strict_interleave(float* noalias nocapture readonly %a, float* noalias nocapture readonly %b, i64 %n) { -; CHECK-LABEL: @fadd_strict_interleave -; CHECK: entry -; CHECK: %[[ARRAYIDX:.*]] = getelementptr inbounds float, float* %a, i64 1 -; CHECK: %[[LOAD1:.*]] = load float, float* %a -; CHECK: %[[LOAD2:.*]] = load float, float* %[[ARRAYIDX]] -; CHECK: vector.body -; CHECK: %[[VEC_PHI1:.*]] = phi float [ %[[LOAD2]], %vector.ph ], [ %[[RDX2:.*]], %vector.body ] -; CHECK: %[[VEC_PHI2:.*]] = phi float [ %[[LOAD1]], %vector.ph ], [ %[[RDX1:.*]], %vector.body ] -; CHECK: %[[WIDE_LOAD:.*]] = load <8 x float>, <8 x float>* -; CHECK: %[[STRIDED1:.*]] = shufflevector <8 x float> %[[WIDE_LOAD]], <8 x float> poison, <4 x i32> -; CHECK: %[[STRIDED2:.*]] = shufflevector <8 x float> %[[WIDE_LOAD]], <8 x float> poison, <4 x i32> -; CHECK: %[[RDX1]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI2]], <4 x float> %[[STRIDED1]]) -; CHECK: %[[RDX2]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI1]], <4 x float> %[[STRIDED2]]) -; CHECK: for.end -; CHECK ret void +; CHECK-VF4UF1-LABEL: @fadd_strict_interleave +; CHECK-VF4UF1: entry +; CHECK-VF4UF1: %[[ARRAYIDX:.*]] = getelementptr inbounds float, float* %a, i64 1 +; CHECK-VF4UF1: %[[LOAD1:.*]] = load float, float* %a +; CHECK-VF4UF1: %[[LOAD2:.*]] = load float, float* %[[ARRAYIDX]] +; CHECK-VF4UF1: vector.body +; CHECK-VF4UF1: %[[VEC_PHI1:.*]] = phi float [ %[[LOAD2]], %vector.ph ], [ %[[RDX2:.*]], %vector.body ] +; CHECK-VF4UF1: %[[VEC_PHI2:.*]] = phi float [ %[[LOAD1]], %vector.ph ], [ %[[RDX1:.*]], %vector.body ] +; CHECK-VF4UF1: %[[WIDE_LOAD:.*]] = load <8 x float>, <8 x float>* +; CHECK-VF4UF1: %[[STRIDED1:.*]] = shufflevector <8 x float> %[[WIDE_LOAD]], <8 x float> poison, <4 x i32> +; CHECK-VF4UF1: %[[STRIDED2:.*]] = shufflevector <8 x float> %[[WIDE_LOAD]], <8 x float> poison, <4 x i32> +; CHECK-VF4UF1: %[[RDX1]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI2]], <4 x float> %[[STRIDED1]]) +; CHECK-VF4UF1: %[[RDX2]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI1]], <4 x float> %[[STRIDED2]]) +; CHECK-VF4UF1: for.end +; CHECK-VF4UF1: ret void entry: %arrayidxa = getelementptr inbounds float, float* %a, i64 1 %a1 = load float, float* %a, align 4 @@ -151,7 +160,7 @@ %add2 = fadd float %1, %add.phi1 %iv.next = add nuw nsw i64 %iv, 2 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !2 + br i1 %exitcond.not, label %for.end, label %for.body for.end: store float %add1, float* %a, align 4 @@ -159,19 +168,20 @@ ret void } +; CHECK-REMARKS: vectorized loop (vectorization width: 4, interleaved count: 2) define float @fadd_invariant(float* noalias nocapture readonly %a, float* noalias nocapture readonly %b, i64 %n) { -; CHECK-LABEL: @fadd_invariant -; CHECK: vector.body -; CHECK: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] -; CHECK: %[[LOAD1:.*]] = load <4 x float>, <4 x float>* -; CHECK: %[[LOAD2:.*]] = load <4 x float>, <4 x float>* -; CHECK: %[[ADD:.*]] = fadd <4 x float> %[[LOAD1]], %[[LOAD2]] -; CHECK: %[[RDX]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI1]], <4 x float> %[[ADD]]) -; CHECK: for.end.loopexit -; CHECK: %[[EXIT_PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK: for.end -; CHECK: %[[PHI:.*]] = phi float [ 0.000000e+00, %entry ], [ %[[EXIT_PHI]], %for.end.loopexit ] -; CHECK: ret float %[[PHI]] +; CHECK-VF4UF1-LABEL: @fadd_invariant +; CHECK-VF4UF1: vector.body +; CHECK-VF4UF1: %[[VEC_PHI1:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] +; CHECK-VF4UF1: %[[LOAD1:.*]] = load <4 x float>, <4 x float>* +; CHECK-VF4UF1: %[[LOAD2:.*]] = load <4 x float>, <4 x float>* +; CHECK-VF4UF1: %[[ADD:.*]] = fadd <4 x float> %[[LOAD1]], %[[LOAD2]] +; CHECK-VF4UF1: %[[RDX]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[VEC_PHI1]], <4 x float> %[[ADD]]) +; CHECK-VF4UF1: for.end.loopexit +; CHECK-VF4UF1: %[[EXIT_PHI:.*]] = phi float [ %[[SCALAR:.*]], %for.body ], [ %[[RDX]], %middle.block ] +; CHECK-VF4UF1: for.end +; CHECK-VF4UF1: %[[PHI:.*]] = phi float [ 0.000000e+00, %entry ], [ %[[EXIT_PHI]], %for.end.loopexit ] +; CHECK-VF4UF1: ret float %[[PHI]] entry: %arrayidx = getelementptr inbounds float, float* %a, i64 1 %0 = load float, float* %arrayidx, align 4 @@ -189,40 +199,41 @@ %rdx = fadd float %res.014, %add %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !2 + br i1 %exitcond.not, label %for.end, label %for.body for.end: ; preds = %for.body, %entry %res = phi float [ 0.000000e+00, %entry ], [ %rdx, %for.body ] ret float %res } +; CHECK-REMARKS: the cost-model indicates that vectorization is not beneficial define float @fadd_conditional(float* noalias nocapture readonly %a, float* noalias nocapture readonly %b, i64 %n) { -; CHECK-LABEL: @fadd_conditional -; CHECK: vector.body: -; CHECK: %[[PHI:.*]] = phi float [ 1.000000e+00, %vector.ph ], [ %[[RDX:.*]], %pred.load.continue6 ] -; CHECK: %[[LOAD1:.*]] = load <4 x float>, <4 x float>* -; CHECK: %[[FCMP1:.*]] = fcmp une <4 x float> %[[LOAD1]], zeroinitializer -; CHECK: %[[EXTRACT:.*]] = extractelement <4 x i1> %[[FCMP1]], i32 0 -; CHECK: br i1 %[[EXTRACT]], label %pred.load.if, label %pred.load.continue -; CHECK: pred.load.continue6 -; CHECK: %[[PHI1:.*]] = phi <4 x float> [ %[[PHI0:.*]], %pred.load.continue4 ], [ %[[INS_ELT:.*]], %pred.load.if5 ] -; CHECK: %[[XOR:.*]] = xor <4 x i1> %[[FCMP1]], -; CHECK: %[[PRED:.*]] = select <4 x i1> %[[XOR]], <4 x float> , <4 x float> %[[PHI1]] -; CHECK: %[[RDX]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[PHI]], <4 x float> %[[PRED]]) -; CHECK: for.body -; CHECK: %[[RES_PHI:.*]] = phi float [ %[[MERGE_RDX:.*]], %scalar.ph ], [ %[[FADD:.*]], %for.inc ] -; CHECK: %[[LOAD2:.*]] = load float, float* -; CHECK: %[[FCMP2:.*]] = fcmp une float %[[LOAD2]], 0.000000e+00 -; CHECK: br i1 %[[FCMP2]], label %if.then, label %for.inc -; CHECK: if.then -; CHECK: %[[LOAD3:.*]] = load float, float* -; CHECK: br label %for.inc -; CHECK: for.inc -; CHECK: %[[PHI2:.*]] = phi float [ %[[LOAD3]], %if.then ], [ 3.000000e+00, %for.body ] -; CHECK: %[[FADD]] = fadd float %[[RES_PHI]], %[[PHI2]] -; CHECK: for.end -; CHECK: %[[RDX_PHI:.*]] = phi float [ %[[FADD]], %for.inc ], [ %[[RDX]], %middle.block ] -; CHECK: ret float %[[RDX_PHI]] +; CHECK-VF4UF1-LABEL: @fadd_conditional +; CHECK-VF4UF1: vector.body: +; CHECK-VF4UF1: %[[PHI:.*]] = phi float [ 1.000000e+00, %vector.ph ], [ %[[RDX:.*]], %pred.load.continue6 ] +; CHECK-VF4UF1: %[[LOAD1:.*]] = load <4 x float>, <4 x float>* +; CHECK-VF4UF1: %[[FCMP1:.*]] = fcmp une <4 x float> %[[LOAD1]], zeroinitializer +; CHECK-VF4UF1: %[[EXTRACT:.*]] = extractelement <4 x i1> %[[FCMP1]], i32 0 +; CHECK-VF4UF1: br i1 %[[EXTRACT]], label %pred.load.if, label %pred.load.continue +; CHECK-VF4UF1: pred.load.continue6 +; CHECK-VF4UF1: %[[PHI1:.*]] = phi <4 x float> [ %[[PHI0:.*]], %pred.load.continue4 ], [ %[[INS_ELT:.*]], %pred.load.if5 ] +; CHECK-VF4UF1: %[[XOR:.*]] = xor <4 x i1> %[[FCMP1]], +; CHECK-VF4UF1: %[[PRED:.*]] = select <4 x i1> %[[XOR]], <4 x float> , <4 x float> %[[PHI1]] +; CHECK-VF4UF1: %[[RDX]] = call float @llvm.vector.reduce.fadd.v4f32(float %[[PHI]], <4 x float> %[[PRED]]) +; CHECK-VF4UF1: for.body +; CHECK-VF4UF1: %[[RES_PHI:.*]] = phi float [ %[[MERGE_RDX:.*]], %scalar.ph ], [ %[[FADD:.*]], %for.inc ] +; CHECK-VF4UF1: %[[LOAD2:.*]] = load float, float* +; CHECK-VF4UF1: %[[FCMP2:.*]] = fcmp une float %[[LOAD2]], 0.000000e+00 +; CHECK-VF4UF1: br i1 %[[FCMP2]], label %if.then, label %for.inc +; CHECK-VF4UF1: if.then +; CHECK-VF4UF1: %[[LOAD3:.*]] = load float, float* +; CHECK-VF4UF1: br label %for.inc +; CHECK-VF4UF1: for.inc +; CHECK-VF4UF1: %[[PHI2:.*]] = phi float [ %[[LOAD3]], %if.then ], [ 3.000000e+00, %for.body ] +; CHECK-VF4UF1: %[[FADD]] = fadd float %[[RES_PHI]], %[[PHI2]] +; CHECK-VF4UF1: for.end +; CHECK-VF4UF1: %[[RDX_PHI:.*]] = phi float [ %[[FADD]], %for.inc ], [ %[[RDX]], %middle.block ] +; CHECK-VF4UF1: ret float %[[RDX_PHI]] entry: br label %for.body @@ -244,7 +255,7 @@ %fadd = fadd float %res, %phi %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !2 + br i1 %exitcond.not, label %for.end, label %for.body for.end: %rdx = phi float [ %fadd, %for.inc ] @@ -252,21 +263,22 @@ } ; Test to check masking correct, using the "llvm.loop.vectorize.predicate.enable" attribute +; CHECK-REMARKS: interleaved loop (interleaved count: 2) define float @fadd_predicated(float* noalias nocapture %a, i64 %n) { -; CHECK-LABEL: @fadd_predicated -; CHECK: vector.ph -; CHECK: %[[TRIP_MINUS_ONE:.*]] = sub i64 %n, 1 -; CHECK: %[[BROADCAST_INS:.*]] = insertelement <2 x i64> poison, i64 %[[TRIP_MINUS_ONE]], i32 0 -; CHECK: %[[SPLAT:.*]] = shufflevector <2 x i64> %[[BROADCAST_INS]], <2 x i64> poison, <2 x i32> zeroinitializer -; CHECK: vector.body -; CHECK: %[[RDX_PHI:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %pred.load.continue2 ] -; CHECK: pred.load.continue2 -; CHECK: %[[PHI:.*]] = phi <2 x float> [ %[[PHI0:.*]], %pred.load.continue ], [ %[[INS_ELT:.*]], %pred.load.if1 ] -; CHECK: %[[MASK:.*]] = select <2 x i1> %0, <2 x float> %[[PHI]], <2 x float> -; CHECK: %[[RDX]] = call float @llvm.vector.reduce.fadd.v2f32(float %[[RDX_PHI]], <2 x float> %[[MASK]]) -; CHECK: for.end: -; CHECK: %[[RES_PHI:.*]] = phi float [ %[[FADD:.*]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK: ret float %[[RES_PHI]] +; CHECK-PRED-LABEL: @fadd_predicated +; CHECK-PRED: vector.ph +; CHECK-PRED: %[[TRIP_MINUS_ONE:.*]] = sub i64 %n, 1 +; CHECK-PRED: %[[BROADCAST_INS:.*]] = insertelement <2 x i64> poison, i64 %[[TRIP_MINUS_ONE]], i32 0 +; CHECK-PRED: %[[SPLAT:.*]] = shufflevector <2 x i64> %[[BROADCAST_INS]], <2 x i64> poison, <2 x i32> zeroinitializer +; CHECK-PRED: vector.body +; CHECK-PRED: %[[RDX_PHI:.*]] = phi float [ 0.000000e+00, %vector.ph ], [ %[[RDX:.*]], %pred.load.continue2 ] +; CHECK-PRED: pred.load.continue2 +; CHECK-PRED: %[[PHI:.*]] = phi <2 x float> [ %[[PHI0:.*]], %pred.load.continue ], [ %[[INS_ELT:.*]], %pred.load.if1 ] +; CHECK-PRED: %[[MASK:.*]] = select <2 x i1> %0, <2 x float> %[[PHI]], <2 x float> +; CHECK-PRED: %[[RDX]] = call float @llvm.vector.reduce.fadd.v2f32(float %[[RDX_PHI]], <2 x float> %[[MASK]]) +; CHECK-PRED: for.end: +; CHECK-PRED: %[[RES_PHI:.*]] = phi float [ %[[FADD:.*]], %for.body ], [ %[[RDX]], %middle.block ] +; CHECK-PRED: ret float %[[RES_PHI]] entry: br label %for.body @@ -278,7 +290,7 @@ %l7 = fadd float %sum.02, %l3 %iv.next = add i64 %iv, 1 %exitcond = icmp eq i64 %iv.next, %n - br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !3 + br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0 for.end: ; preds = %for.body %sum.0.lcssa = phi float [ %l7, %for.body ] @@ -286,25 +298,26 @@ } ; Negative test - loop contains multiple fadds which we cannot safely reorder +; CHECK-REMARKS: loop not vectorized: cannot prove it is safe to reorder floating-point operations define float @fadd_multiple(float* noalias nocapture %a, float* noalias nocapture %b, i64 %n) { -; CHECK-LABEL: @fadd_multiple -; CHECK: vector.body -; CHECK: %[[PHI:.*]] = phi <8 x float> [ , %vector.ph ], [ %[[VEC_FADD2:.*]], %vector.body ] -; CHECK: %[[VEC_LOAD1:.*]] = load <8 x float>, <8 x float> -; CHECK: %[[VEC_FADD1:.*]] = fadd <8 x float> %[[PHI]], %[[VEC_LOAD1]] -; CHECK: %[[VEC_LOAD2:.*]] = load <8 x float>, <8 x float> -; CHECK: %[[VEC_FADD2]] = fadd <8 x float> %[[VEC_FADD1]], %[[VEC_LOAD2]] -; CHECK: middle.block -; CHECK: %[[RDX:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> %[[VEC_FADD2]]) -; CHECK: for.body -; CHECK: %[[SUM:.*]] = phi float [ %bc.merge.rdx, %scalar.ph ], [ %[[FADD2:.*]], %for.body ] -; CHECK: %[[LOAD1:.*]] = load float, float* -; CHECK: %[[FADD1:.*]] = fadd float %sum, %[[LOAD1]] -; CHECK: %[[LOAD2:.*]] = load float, float* -; CHECK: %[[FADD2]] = fadd float %[[FADD1]], %[[LOAD2]] -; CHECK: for.end -; CHECK: %[[RET:.*]] = phi float [ %[[FADD2]], %for.body ], [ %[[RDX]], %middle.block ] -; CHECK: ret float %[[RET]] +; CHECK-VF8UF1-LABEL: @fadd_multiple +; CHECK-VF8UF1: vector.body +; CHECK-VF8UF1: %[[PHI:.*]] = phi <8 x float> [ , %vector.ph ], [ %[[VEC_FADD2:.*]], %vector.body ] +; CHECK-VF8UF1: %[[VEC_LOAD1:.*]] = load <8 x float>, <8 x float> +; CHECK-VF8UF1: %[[VEC_FADD1:.*]] = fadd <8 x float> %[[PHI]], %[[VEC_LOAD1]] +; CHECK-VF8UF1: %[[VEC_LOAD2:.*]] = load <8 x float>, <8 x float> +; CHECK-VF8UF1: %[[VEC_FADD2]] = fadd <8 x float> %[[VEC_FADD1]], %[[VEC_LOAD2]] +; CHECK-VF8UF1: middle.block +; CHECK-VF8UF1: %[[RDX:.*]] = call float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> %[[VEC_FADD2]]) +; CHECK-VF8UF1: for.body +; CHECK-VF8UF1: %[[SUM:.*]] = phi float [ %bc.merge.rdx, %scalar.ph ], [ %[[FADD2:.*]], %for.body ] +; CHECK-VF8UF1: %[[LOAD1:.*]] = load float, float* +; CHECK-VF8UF1: %[[FADD1:.*]] = fadd float %sum, %[[LOAD1]] +; CHECK-VF8UF1: %[[LOAD2:.*]] = load float, float* +; CHECK-VF8UF1: %[[FADD2]] = fadd float %[[FADD1]], %[[LOAD2]] +; CHECK-VF8UF1: for.end +; CHECK-VF8UF1: %[[RET:.*]] = phi float [ %[[FADD2]], %for.body ], [ %[[RDX]], %middle.block ] +; CHECK-VF8UF1: ret float %[[RET]] entry: br label %for.body @@ -319,21 +332,12 @@ %add3 = fadd float %add, %1 %iv.next = add nuw nsw i64 %iv, 1 %exitcond.not = icmp eq i64 %iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body, !llvm.loop !0 + br i1 %exitcond.not, label %for.end, label %for.body for.end: ; preds = %for.body %rdx = phi float [ %add3, %for.body ] ret float %rdx } -!0 = distinct !{!0, !4, !7, !9} -!1 = distinct !{!1, !4, !8, !9} -!2 = distinct !{!2, !5, !7, !9} -!3 = distinct !{!3, !6, !7, !9, !10} -!4 = !{!"llvm.loop.vectorize.width", i32 8} -!5 = !{!"llvm.loop.vectorize.width", i32 4} -!6 = !{!"llvm.loop.vectorize.width", i32 2} -!7 = !{!"llvm.loop.interleave.count", i32 1} -!8 = !{!"llvm.loop.interleave.count", i32 4} -!9 = !{!"llvm.loop.vectorize.enable", i1 true} -!10 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} +!0 = distinct !{!0, !1} +!1 = !{!"llvm.loop.vectorize.predicate.enable", i1 true}