diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1263,6 +1263,11 @@ /// are the selected vectorization factor and the cost of the selected VF. unsigned selectInterleaveCount(ElementCount VF, unsigned LoopCost); + /// \return true when the loop contains a predicated instruction that requires + /// scalarization. + bool loopHasScalarWithPredication(ElementCount VF, + bool StateFoldTailByMasking); + /// Memory access instruction may be vectorized in more than one way. /// Form of instruction after vectorization depends on cost. /// This function takes cost-based decisions for Load/Store instructions @@ -5597,6 +5602,15 @@ return ElementCount::getScalable(0); } + if (loopHasScalarWithPredication(MaxScalableVF, isScalarEpilogueAllowed())) { + reportVectorizationInfo( + "Scalable vectorization not supported for predicated operations" + "operations found in this loop. Using fixed-width " + "vectorization instead.", + "ScalableVFUnfeasible", ORE, TheLoop); + return ElementCount::getScalable(0); + } + if (Legal->isSafeForAnyVectorWidth()) return MaxScalableVF; @@ -7749,6 +7763,18 @@ return false; } +bool LoopVectorizationCostModel::loopHasScalarWithPredication( + ElementCount VF, bool StateFoldTailByMasking) { + for (BasicBlock *BB : TheLoop->blocks()) { + // For each instruction in the old loop. + for (Instruction &I : BB->instructionsWithoutDebug()) { + if (!Legal->canVectorizeWithPredication(&I, StateFoldTailByMasking)) + return true; + } + } + return false; +} + void LoopVectorizationCostModel::collectValuesToIgnore() { // Ignore ephemeral values. CodeMetrics::collectEphemeralValues(TheLoop, AC, ValuesToIgnore); diff --git a/llvm/test/Transforms/LoopVectorize/scalable-predicate-instruction.ll b/llvm/test/Transforms/LoopVectorize/scalable-predicate-instruction.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/scalable-predicate-instruction.ll @@ -0,0 +1,81 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -loop-vectorize -force-target-supports-scalable-vectors -S | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" + +; This check this cpp function +; for (long long i = 0; i < n; i++) { +; if (cond[i]) +; a[i] /= b[i]; +; } + +; The division should not allow the loop vectorization +; It can only vectorize when division by zero is garanteed not to happen + +define void @predicate_instruction_with_scalableVF(i32* %a, i32* %b, i32* %cond, i64 %n) #0 { +; CHECK-LABEL: @predicate_instruction_with_scalableVF( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i64 [[N:%.*]], 0 +; CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.body.preheader: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.cond.cleanup.loopexit: +; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; CHECK: for.body: +; CHECK-NEXT: [[I_09:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[COND:%.*]], i64 [[I_09]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i32 [[TMP0]], 0 +; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]] +; CHECK: if.then: +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[I_09]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX1]], align 4 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[I_09]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP2]], [[TMP1]] +; CHECK-NEXT: store i32 [[DIV]], i32* [[ARRAYIDX2]], align 4 +; CHECK-NEXT: br label [[FOR_INC]] +; CHECK: for.inc: +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_09]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; +entry: + %cmp8 = icmp sgt i64 %n, 0 + br i1 %cmp8, label %for.body, label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.inc, %entry + ret void + +for.body: ; preds = %entry, %for.inc + %i.09 = phi i64 [ %inc, %for.inc ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i32, i32* %cond, i64 %i.09 + %0 = load i32, i32* %arrayidx, align 4 + %tobool.not = icmp eq i32 %0, 0 + br i1 %tobool.not, label %for.inc, label %if.then + +if.then: ; preds = %for.body + %arrayidx1 = getelementptr inbounds i32, i32* %b, i64 %i.09 + %1 = load i32, i32* %arrayidx1, align 4 + %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %i.09 + %2 = load i32, i32* %arrayidx2, align 4 + %div = sdiv i32 %2, %1 + store i32 %div, i32* %arrayidx2, align 4 + br label %for.inc + +for.inc: ; preds = %for.body, %if.then + %inc = add nuw nsw i64 %i.09, 1 + %exitcond.not = icmp eq i64 %inc, %n + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !0 +} + +attributes #0 = {"target-cpu"="generic"} + + +!0 = distinct !{!0, !1, !2, !3, !4} +!1 = !{!"llvm.loop.vectorize.width", i32 4} +!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} +!3 = !{!"llvm.loop.interleave.count", i32 1} +!4 = !{!"llvm.loop.vectorize.enable", i1 true}