diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -2510,6 +2510,16 @@ /// Return a vector value that contains the vector V reversed Value *CreateVectorReverse(Value *V, const Twine &Name = ""); + /// Return a vector splice intrinsic if using scalable vectors, otherwise + /// return a shufflevector. If the immediate is positive, a vector is + /// extracted from concat(V1, V2), starting at Imm. If the immediate + /// is negative, we extract -Imm elements from V1 and the remaining + /// elements from V2. Imm is a signed integer in the range + /// -VL <= Imm < VL (where VL is the runtime vector length of the + /// source/result vector) + Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, + const Twine &Name = ""); + /// Return a vector value that contains \arg V broadcasted to \p /// NumElts elements. Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = ""); diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -1027,6 +1027,34 @@ return CreateShuffleVector(V, ShuffleMask, Name); } +Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, + const Twine &Name) { + assert(isa(V1->getType()) && "Unexpected type"); + assert(V1->getType() == V2->getType() && + "Splice expects matching operand types!"); + + if (auto *VTy = dyn_cast(V1->getType())) { + Module *M = BB->getParent()->getParent(); + Function *F = Intrinsic::getDeclaration( + M, Intrinsic::experimental_vector_splice, VTy); + + Value *Ops[] = {V1, V2, getInt32(Imm)}; + return Insert(CallInst::Create(F, Ops), Name); + } + + unsigned NumElts = cast(V1->getType())->getNumElements(); + assert(((-Imm <= NumElts) || (Imm < NumElts)) && + "Invalid immediate for vector splice!"); + + // Keep the original behaviour for fixed vector + unsigned Idx = (NumElts + Imm) % NumElts; + SmallVector Mask; + for (unsigned I = 0; I < NumElts; ++I) + Mask.push_back(Idx + I); + + return CreateShuffleVector(V1, V2, Mask); +} + Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name) { auto EC = ElementCount::getFixed(NumElts); diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4173,14 +4173,18 @@ auto *ScalarInit = Phi->getIncomingValueForBlock(Preheader); auto *Previous = Phi->getIncomingValueForBlock(Latch); + auto *IdxTy = Builder.getInt32Ty(); + auto *One = ConstantInt::get(IdxTy, 1); + // Create a vector from the initial value. auto *VectorInit = ScalarInit; if (VF.isVector()) { Builder.SetInsertPoint(LoopVectorPreHeader->getTerminator()); - assert(!VF.isScalable() && "VF is assumed to be non scalable."); + auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); + auto *LastIdx = Builder.CreateSub(RuntimeVF, One); VectorInit = Builder.CreateInsertElement( - PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), VectorInit, - Builder.getInt32(VF.getKnownMinValue() - 1), "vector.recur.init"); + PoisonValue::get(VectorType::get(VectorInit->getType(), VF)), + VectorInit, LastIdx, "vector.recur.init"); } VPValue *PhiDef = State.Plan->getVPValue(Phi); @@ -4220,14 +4224,6 @@ } Builder.SetInsertPoint(&*InsertPt); - // We will construct a vector for the recurrence by combining the values for - // the current and previous iterations. This is the required shuffle mask. - assert(!VF.isScalable()); - SmallVector ShuffleMask(VF.getKnownMinValue()); - ShuffleMask[0] = VF.getKnownMinValue() - 1; - for (unsigned I = 1; I < VF.getKnownMinValue(); ++I) - ShuffleMask[I] = I + VF.getKnownMinValue() - 1; - // The vector from which to take the initial value for the current iteration // (actual or unrolled). Initially, this is the vector phi node. Value *Incoming = VecPhi; @@ -4236,10 +4232,9 @@ for (unsigned Part = 0; Part < UF; ++Part) { Value *PreviousPart = State.get(PreviousDef, Part); Value *PhiPart = State.get(PhiDef, Part); - auto *Shuffle = - VF.isVector() - ? Builder.CreateShuffleVector(Incoming, PreviousPart, ShuffleMask) - : Incoming; + auto *Shuffle = VF.isVector() + ? Builder.CreateVectorSplice(Incoming, PreviousPart, -1) + : Incoming; PhiPart->replaceAllUsesWith(Shuffle); cast(PhiPart)->eraseFromParent(); State.reset(PhiDef, Shuffle, Part); @@ -4254,9 +4249,10 @@ auto *ExtractForScalar = Incoming; if (VF.isVector()) { Builder.SetInsertPoint(LoopMiddleBlock->getTerminator()); - ExtractForScalar = Builder.CreateExtractElement( - ExtractForScalar, Builder.getInt32(VF.getKnownMinValue() - 1), - "vector.recur.extract"); + auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); + auto *LastIdx = Builder.CreateSub(RuntimeVF, One); + ExtractForScalar = Builder.CreateExtractElement(ExtractForScalar, LastIdx, + "vector.recur.extract"); } // Extract the second last element in the middle block if the // Phi is used outside the loop. We need to extract the phi itself @@ -4264,15 +4260,16 @@ // will be the value when jumping to the exit block from the LoopMiddleBlock, // when the scalar loop is not run at all. Value *ExtractForPhiUsedOutsideLoop = nullptr; - if (VF.isVector()) + if (VF.isVector()) { + auto *RuntimeVF = getRuntimeVF(Builder, IdxTy, VF); + auto *Idx = Builder.CreateSub(RuntimeVF, ConstantInt::get(IdxTy, 2)); ExtractForPhiUsedOutsideLoop = Builder.CreateExtractElement( - Incoming, Builder.getInt32(VF.getKnownMinValue() - 2), - "vector.recur.extract.for.phi"); - // When loop is unrolled without vectorizing, initialize - // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value of - // `Incoming`. This is analogous to the vectorized case above: extracting the - // second last element when VF > 1. - else if (UF > 1) + Incoming, Idx, "vector.recur.extract.for.phi"); + } else if (UF > 1) + // When loop is unrolled without vectorizing, initialize + // ExtractForPhiUsedOutsideLoop with the value just prior to unrolled value + // of `Incoming`. This is analogous to the vectorized case above: extracting + // the second last element when VF > 1. ExtractForPhiUsedOutsideLoop = State.get(PreviousDef, UF - 2); // Fix the initial value of the original recurrence in the scalar loop. diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence.ll @@ -0,0 +1,104 @@ +; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s --check-prefix=CHECK-VF4UF1 +; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -mtriple aarch64-unknown-linux-gnu -mattr=+sve -S < %s | FileCheck %s --check-prefix=CHECK-VF4UF2 + +; We vectorize this first order recurrence, with a set of insertelements for +; each unrolled part. Make sure these insertelements are generated in-order, +; because the shuffle of the first order recurrence will be added after the +; insertelement of the last part UF - 1, assuming the latter appears after the +; insertelements of all other parts. +; +; int PR33613(double *b, double j, int d) { +; int a = 0; +; for(int i = 0; i < 10240; i++, b+=25) { +; double f = b[d]; // Scalarize to form insertelements +; if (j * f) +; a++; +; j = f; +; } +; return a; +; } +; +define i32 @PR33613(double* %b, double %j, i32 %d) { +; CHECK-VF4UF2-LABEL: @PR33613 +; CHECK-VF4UF2: vector.body +; CHECK-VF4UF2: %[[VEC_RECUR:.*]] = phi [ {{.*}}, %vector.ph ], [ {{.*}}, %vector.body ] +; CHECK-VF4UF2: %[[SPLICE1:.*]] = call @llvm.experimental.vector.splice.nxv4f64( %[[VEC_RECUR]], {{.*}}, i32 -1) +; CHECK-VF4UF2-NEXT: %[[SPLICE2:.*]] = call @llvm.experimental.vector.splice.nxv4f64( %{{.*}}, %{{.*}}, i32 -1) +; CHECK-VF4UF2-NOT: insertelement +; CHECK-VF4UF2: middle.block +entry: + %idxprom = sext i32 %d to i64 + br label %for.body + +for.cond.cleanup: + %a.1.lcssa = phi i32 [ %a.1, %for.body ] + ret i32 %a.1.lcssa + +for.body: + %b.addr.012 = phi double* [ %b, %entry ], [ %add.ptr, %for.body ] + %i.011 = phi i32 [ 0, %entry ], [ %inc1, %for.body ] + %a.010 = phi i32 [ 0, %entry ], [ %a.1, %for.body ] + %j.addr.09 = phi double [ %j, %entry ], [ %0, %for.body ] + %arrayidx = getelementptr inbounds double, double* %b.addr.012, i64 %idxprom + %0 = load double, double* %arrayidx, align 8 + %mul = fmul double %j.addr.09, %0 + %tobool = fcmp une double %mul, 0.000000e+00 + %inc = zext i1 %tobool to i32 + %a.1 = add nsw i32 %a.010, %inc + %inc1 = add nuw nsw i32 %i.011, 1 + %add.ptr = getelementptr inbounds double, double* %b.addr.012, i64 25 + %exitcond = icmp eq i32 %inc1, 10240 + br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !0 +} + +; PR34711: given three consecutive instructions such that the first will be +; widened, the second is a cast that will be widened and needs to sink after the +; third, and the third is a first-order-recurring load that will be replicated +; instead of widened. Although the cast and the first instruction will both be +; widened, and are originally adjacent to each other, make sure the replicated +; load ends up appearing between them. +; +; void PR34711(short[2] *a, int *b, int *c, int n) { +; for(int i = 0; i < n; i++) { +; c[i] = 7; +; b[i] = (a[i][0] * a[i][1]); +; } +; } +; +; Check that the sext sank after the load in the vector loop. +define void @PR34711([2 x i16]* %a, i32* %b, i32* %c, i64 %n) { +; CHECK-VF4UF1-LABEL: @PR34711 +; CHECK-VF4UF1: vector.body +; CHECK-VF4UF1: %[[VEC_RECUR:.*]] = phi [ %vector.recur.init, %vector.ph ], [ %[[MGATHER:.*]], %vector.body ] +; CHECK-VF4UF1: %[[MGATHER]] = call @llvm.masked.gather.nxv4i16.nxv4p0i16( {{.*}}, i32 2, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), undef) +; CHECK-VF4UF1-NEXT: %[[SPLICE:.*]] = call @llvm.experimental.vector.splice.nxv4i16( %[[VEC_RECUR]], %[[MGATHER]], i32 -1) +; CHECK-VF4UF1-NEXT: %[[SXT1:.*]] = sext %[[SPLICE]] to +; CHECK-VF4UF1-NEXT: %[[SXT2:.*]] = sext %[[MGATHER]] to +; CHECK-VF4UF1-NEXT: mul nsw %[[SXT2]], %[[SXT1]] +entry: + %pre.index = getelementptr inbounds [2 x i16], [2 x i16]* %a, i64 0, i64 0 + %.pre = load i16, i16* %pre.index + br label %for.body + +for.body: + %0 = phi i16 [ %.pre, %entry ], [ %1, %for.body ] + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arraycidx = getelementptr inbounds i32, i32* %c, i64 %indvars.iv + %cur.index = getelementptr inbounds [2 x i16], [2 x i16]* %a, i64 %indvars.iv, i64 1 + store i32 7, i32* %arraycidx ; 1st instruction, to be widened. + %conv = sext i16 %0 to i32 ; 2nd, cast to sink after third. + %1 = load i16, i16* %cur.index ; 3rd, first-order-recurring load not widened. + %conv3 = sext i16 %1 to i32 + %mul = mul nsw i32 %conv3, %conv + %arrayidx5 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv + store i32 %mul, i32* %arrayidx5 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond = icmp eq i64 %indvars.iv.next, %n + br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret void +} + +!0 = distinct !{!0, !1} +!1 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} diff --git a/llvm/test/Transforms/LoopVectorize/scalable-first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/scalable-first-order-recurrence.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/scalable-first-order-recurrence.ll @@ -0,0 +1,274 @@ +; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -force-target-supports-scalable-vectors=true -S < %s | FileCheck %s --check-prefix=CHECK-VF4UF1 +; RUN: opt -loop-vectorize -force-vector-width=4 -force-vector-interleave=2 -force-target-supports-scalable-vectors=true -S < %s | FileCheck %s --check-prefix=CHECK-VF4UF2 + +; void recurrence_1(int *a, int *b, int n) { +; for(int i = 0; i < n; i++) +; b[i] = a[i] + a[i - 1] +; } +; +define void @recurrence_1(i32* nocapture readonly %a, i32* nocapture %b, i32 %n) { +; CHECK-VF4UF1-LABEL: @recurrence_1 +; CHECK-VF4UF1: for.preheader +; CHECK-VF4UF1: %[[SUB_1:.*]] = add i32 %n, -1 +; CHECK-VF4UF1: %[[ZEXT:.*]] = zext i32 %[[SUB_1]] to i64 +; CHECK-VF4UF1: %[[ADD:.*]] = add nuw nsw i64 %[[ZEXT]], 1 +; CHECK-VF4UF1: vector.ph: +; CHECK-VF4UF1: %[[VSCALE1:.*]] = call i32 @llvm.vscale.i32() +; CHECK-VF4UF1: %[[MUL1:.*]] = mul i32 %[[VSCALE1]], 4 +; CHECK-VF4UF1: %[[SUB1:.*]] = sub i32 %[[MUL1]], 1 +; CHECK-VF4UF1: %[[VEC_RECUR_INIT:.*]] = insertelement poison, i32 %pre_load, i32 %[[SUB1]] +; CHECK-VF4UF1: vector.body: +; CHECK-VF4UF1: %[[INDEX:.*]] = phi i64 [ 0, %vector.ph ], [ %[[NEXT_IDX:.*]], %vector.body ] +; CHECK-VF4UF1: %[[VEC_RECUR:.*]] = phi [ %[[VEC_RECUR_INIT]], %vector.ph ], [ %[[LOAD:.*]], %vector.body ] +; CHECK-VF4UF1: %[[LOAD]] = load , * +; CHECK-VF4UF1: %[[SPLICE:.*]] = call @llvm.experimental.vector.splice.nxv4i32( %[[VEC_RECUR]], %[[LOAD]], i32 -1) +; CHECK-VF4UF1: middle.block: +; CHECK-VF4UF1: %[[VSCALE2:.*]] = call i32 @llvm.vscale.i32() +; CHECK-VF4UF1: %[[MUL2:.*]] = mul i32 %[[VSCALE2]], 4 +; CHECK-VF4UF1: %[[SUB2:.*]] = sub i32 %[[MUL2]], 1 +; CHECK-VF4UF1: %[[VEC_RECUR_EXT:.*]] = extractelement %[[LOAD]], i32 %[[SUB2]] +; CHECK-VF4UF1: %[[VSCALE3:.*]] = call i32 @llvm.vscale.i32() +; CHECK-VF4UF1: %[[MUL3:.*]] = mul i32 %[[VSCALE3]], 4 +; CHECK-VF4UF1: %[[SUB3:.*]] = sub i32 %[[MUL3]], 2 +; CHECK-VF4UF1: %[[VEC_RECUR_FOR_PHI:.*]] = extractelement %[[LOAD]], i32 %[[SUB3]] +entry: + br label %for.preheader + +for.preheader: + %arrayidx.phi.trans.insert = getelementptr inbounds i32, i32* %a, i64 0 + %pre_load = load i32, i32* %arrayidx.phi.trans.insert + br label %scalar.body + +scalar.body: + %0 = phi i32 [ %pre_load, %for.preheader ], [ %1, %scalar.body ] + %indvars.iv = phi i64 [ 0, %for.preheader ], [ %indvars.iv.next, %scalar.body ] + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %arrayidx32 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next + %1 = load i32, i32* %arrayidx32 + %arrayidx34 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv + %add35 = add i32 %1, %0 + store i32 %add35, i32* %arrayidx34 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp eq i32 %lftr.wideiv, %n + br i1 %exitcond, label %for.exit, label %scalar.body, !llvm.loop !0 + +for.exit: + ret void +} + +; int recurrence_2(int *a, int n) { +; int minmax; +; for (int i = 0; i < n; ++i) +; minmax = min(minmax, max(a[i] - a[i-1], 0)); +; return minmax; +; } +; +define i32 @recurrence_2(i32* nocapture readonly %a, i32 %n) { +; CHECK-VF4UF1-LABEL: @recurrence_2 +; CHECK-VF4UF1: vector.ph: +; CHECK-VF4UF1: %[[VSCALE1:.*]] = call i32 @llvm.vscale.i32() +; CHECK-VF4UF1: %[[MUL1:.*]] = mul i32 %[[VSCALE1]], 4 +; CHECK-VF4UF1: %[[SUB1:.*]] = sub i32 %[[MUL1]], 1 +; CHECK-VF4UF1: %[[VEC_RECUR_INIT:.*]] = insertelement poison, i32 %.pre, i32 %[[SUB1]] +; CHECK-VF4UF1: vector.body: +; CHECK-VF4UF1: %[[VEC_RECUR:.*]] = phi [ %[[VEC_RECUR_INIT]], %vector.ph ], [ %[[LOAD:.*]], %vector.body ] +; CHECK-VF4UF1: %[[LOAD]] = load , * +; CHECK-VF4UF1: %[[REVERSE:.*]] = call @llvm.experimental.vector.splice.nxv4i32( %[[VEC_RECUR]], %[[LOAD]], i32 -1) +; CHECK-VF4UF1: middle.block: +; CHECK-VF4UF1: %[[VSCALE2:.*]] = call i32 @llvm.vscale.i32() +; CHECK-VF4UF1: %[[MUL2:.*]] = mul i32 %[[VSCALE2]], 4 +; CHECK-VF4UF1: %[[SUB2:.*]] = sub i32 %[[MUL2]], 1 +; CHECK-VF4UF1: %[[VEC_RECUR_EXT:.*]] = extractelement %[[LOAD]], i32 %[[SUB2]] +entry: + %cmp27 = icmp sgt i32 %n, 0 + br i1 %cmp27, label %for.preheader, label %for.cond.cleanup + +for.preheader: + %arrayidx2.phi.trans.insert = getelementptr inbounds i32, i32* %a, i64 -1 + %.pre = load i32, i32* %arrayidx2.phi.trans.insert, align 4 + br label %scalar.body + +for.cond.cleanup.loopexit: + %minmax.0.cond.lcssa = phi i32 [ %minmax.0.cond, %scalar.body ] + br label %for.cond.cleanup + +for.cond.cleanup: + %minmax.0.lcssa = phi i32 [ undef, %entry ], [ %minmax.0.cond.lcssa, %for.cond.cleanup.loopexit ] + ret i32 %minmax.0.lcssa + +scalar.body: + %0 = phi i32 [ %.pre, %for.preheader ], [ %1, %scalar.body ] + %indvars.iv = phi i64 [ 0, %for.preheader ], [ %indvars.iv.next, %scalar.body ] + %minmax.028 = phi i32 [ undef, %for.preheader ], [ %minmax.0.cond, %scalar.body ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + %1 = load i32, i32* %arrayidx, align 4 + %sub3 = sub nsw i32 %1, %0 + %cmp4 = icmp sgt i32 %sub3, 0 + %cond = select i1 %cmp4, i32 %sub3, i32 0 + %cmp5 = icmp slt i32 %minmax.028, %cond + %minmax.0.cond = select i1 %cmp5, i32 %minmax.028, i32 %cond + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp eq i32 %lftr.wideiv, %n + br i1 %exitcond, label %for.cond.cleanup.loopexit, label %scalar.body, !llvm.loop !0 +} + +define void @recurrence_3(i16* nocapture readonly %a, double* nocapture %b, i32 %n, float %f, i16 %p) { +; CHECK-VF4UF1: vector.ph: +; CHECK-VF4UF1: %[[VSCALE1:.*]] = call i32 @llvm.vscale.i32() +; CHECK-VF4UF1: %[[MUL1:.*]] = mul i32 %[[VSCALE1]], 4 +; CHECK-VF4UF1: %[[SUB1:.*]] = sub i32 %[[MUL1]], 1 +; CHECK-VF4UF1: %vector.recur.init = insertelement poison, i16 %0, i32 %[[SUB1]] +; CHECK-VF4UF1: vector.body: +; CHECK-VF4UF1: %vector.recur = phi [ %vector.recur.init, %vector.ph ], [ %[[L1:.*]], %vector.body ] +; CHECK-VF4UF1: %[[L1]] = load , * +; CHECK-VF4UF1: %[[SPLICE:.*]] = call @llvm.experimental.vector.splice.nxv4i16( %vector.recur, %[[L1]], i32 -1) +; Check also that the casts were not moved needlessly. +; CHECK-VF4UF1: sitofp %[[L1]] to +; CHECK-VF4UF1: sitofp %[[SPLICE]] to +; CHECK-VF4UF1: middle.block: +; CHECK-VF4UF1: %[[VSCALE2:.*]] = call i32 @llvm.vscale.i32() +; CHECK-VF4UF1: %[[MUL2:.*]] = mul i32 %[[VSCALE2]], 4 +; CHECK-VF4UF1: %[[SUB2:.*]] = sub i32 %[[MUL2]], 1 +; CHECK-VF4UF1: %vector.recur.extract = extractelement %[[L1]], i32 %[[SUB2]] +entry: + %0 = load i16, i16* %a, align 2 + %conv = sitofp i16 %0 to double + %conv1 = fpext float %f to double + %conv2 = sitofp i16 %p to double + %mul = fmul fast double %conv2, %conv1 + %sub = fsub fast double %conv, %mul + store double %sub, double* %b, align 8 + %cmp25 = icmp sgt i32 %n, 1 + br i1 %cmp25, label %for.preheader, label %for.end + +for.preheader: + br label %scalar.body + +scalar.body: + %1 = phi i16 [ %0, %for.preheader ], [ %2, %scalar.body ] + %iv = phi i64 [ %iv.next, %scalar.body ], [ 1, %for.preheader ] + %arrayidx5 = getelementptr inbounds i16, i16* %a, i64 %iv + %2 = load i16, i16* %arrayidx5, align 2 + %conv6 = sitofp i16 %2 to double + %conv11 = sitofp i16 %1 to double + %mul12 = fmul fast double %conv11, %conv1 + %sub13 = fsub fast double %conv6, %mul12 + %arrayidx15 = getelementptr inbounds double, double* %b, i64 %iv + store double %sub13, double* %arrayidx15, align 8 + %iv.next = add nuw nsw i64 %iv, 1 + %lftr.wideiv = trunc i64 %iv.next to i32 + %exitcond = icmp eq i32 %lftr.wideiv, %n + br i1 %exitcond, label %for.end.loopexit, label %scalar.body, !llvm.loop !0 + +for.end.loopexit: + br label %for.end + +for.end: + ret void +} + +define void @constant_folded_previous_value() { +; CHECK-VF4UF2-LABEL: @constant_folded_previous_value +; CHECK-VF4UF2: vector.body +; CHECK-VF4UF2: %[[VECTOR_RECUR:.*]] = phi [ %vector.recur.init, %vector.ph ], [ shufflevector ( insertelement ( undef, i64 1, i32 0), undef, zeroinitializer), %vector.body ] +; CHECK-VF4UF2-NEXT: %[[SPLICE1:.*]] = call @llvm.experimental.vector.splice.nxv4i64( %vector.recur, shufflevector ( insertelement ( undef, i64 1, i32 0), undef, zeroinitializer), i32 -1) +; CHECK-VF4UF2: %[[SPLICE2:.*]] = call @llvm.experimental.vector.splice.nxv4i64( shufflevector ( insertelement ( undef, i64 1, i32 0), undef, zeroinitializer), shufflevector ( insertelement ( undef, i64 1, i32 0), undef, zeroinitializer), i32 -1) +entry: + br label %scalar.body + +scalar.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %scalar.body ] + %tmp2 = phi i64 [ 0, %entry ], [ %tmp3, %scalar.body ] + %tmp3 = add i64 0, 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, undef + br i1 %cond, label %for.end, label %scalar.body, !llvm.loop !0 + +for.end: + ret void +} + +; We vectorize this first order recurrence, by generating two +; extracts for the phi `val.phi` - one at the last index and +; another at the second last index. We need these 2 extracts because +; the first order recurrence phi is used outside the loop, so we require the phi +; itself and not its update (addx). +define i32 @extract_second_last_iteration(i32* %cval, i32 %x) { +; CHECK-VF4UF2-LABEL: @extract_second_last_iteration +; CHECK-VF4UF2: vector.ph +; CHECK-VF4UF2: %[[SPLAT_INS1:.*]] = insertelement poison, i32 %x, i32 0 +; CHECK-VF4UF2: %[[SPLAT1:.*]] = shufflevector %[[SPLAT_INS1]], poison, zeroinitializer +; CHECK-VF4UF2: %[[SPLAT_INS2:.*]] = insertelement poison, i32 %x, i32 0 +; CHECK-VF4UF2: %[[SPLAT2:.*]] = shufflevector %[[SPLAT_INS2]], poison, zeroinitializer +; CHECK-VF4UF2: %[[VSCALE1:.*]] = call i32 @llvm.vscale.i32() +; CHECK-VF4UF2: %[[MUL1:.*]] = mul i32 %[[VSCALE1]], 4 +; CHECK-VF4UF2: %[[SUB1:.*]] = sub i32 %[[MUL1]], 1 +; CHECK-VF4UF2: %[[VEC_RECUR_INIT:.*]] = insertelement poison, i32 0, i32 %[[SUB1]] +; CHECK-VF4UF2: vector.body +; CHECK-VF4UF2: %[[VEC_RECUR:.*]] = phi [ %[[VEC_RECUR_INIT]], %vector.ph ], [ %[[ADD2:.*]], %vector.body ] +; CHECK-VF4UF2: %[[ADD1:.*]] = add %{{.*}}, %[[SPLAT1]] +; CHECK-VF4UF2: %[[ADD2]] = add %{{.*}}, %[[SPLAT2]] +; CHECK-VF4UF2: middle.block +; CHECK-VF4UF2: %[[VSCALE2:.*]] = call i32 @llvm.vscale.i32() +; CHECK-VF4UF2: %[[MUL2:.*]] = mul i32 %[[VSCALE2]], 4 +; CHECK-VF4UF2: %[[SUB2:.*]] = sub i32 %[[MUL2]], 1 +; CHECK-VF4UF2: %vector.recur.extract = extractelement %[[ADD2]], i32 %[[SUB2]] +; CHECK-VF4UF2: %[[VSCALE3:.*]] = call i32 @llvm.vscale.i32() +; CHECK-VF4UF2: %[[MUL3:.*]] = mul i32 %[[VSCALE3]], 4 +; CHECK-VF4UF2: %[[SUB3:.*]] = sub i32 %[[MUL3]], 2 +; CHECK-VF4UF2: %vector.recur.extract.for.phi = extractelement %[[ADD2]], i32 %[[SUB3]] +entry: + br label %for.body + +for.body: + %inc.phi = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %val.phi = phi i32 [ 0, %entry ], [ %addx, %for.body ] + %inc = add i32 %inc.phi, 1 + %bc = zext i32 %inc.phi to i64 + %addx = add i32 %inc.phi, %x + %cmp = icmp eq i32 %inc.phi, 95 + br i1 %cmp, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret i32 %val.phi +} + +; void sink_after(short *a, int n, int *b) { +; for(int i = 0; i < n; i++) +; b[i] = (a[i] * a[i + 1]); +; } + +; Check that the sext sank after the load in the vector loop. +define void @sink_after(i16* %a, i32* %b, i64 %n) { +; CHECK-VF4UF1-LABEL: @sink_after +; CHECK-VF4UF1: vector.body +; CHECK-VF4UF1: %[[VEC_RECUR:.*]] = phi [ %vector.recur.init, %vector.ph ], [ %[[LOAD:.*]], %vector.body ] +; CHECK-VF4UF1: %[[LOAD]] = load , * +; CHECK-VF4UF1-NEXT: %[[SPLICE:.*]] = call @llvm.experimental.vector.splice.nxv4i16( %[[VEC_RECUR]], %[[LOAD]], i32 -1) +; CHECK-VF4UF1-NEXT: sext %[[SPLICE]] to +; CHECK-VF4UF1-NEXT: sext %[[LOAD]] to +entry: + %.pre = load i16, i16* %a + br label %for.body + +for.body: + %0 = phi i16 [ %.pre, %entry ], [ %1, %for.body ] + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %conv = sext i16 %0 to i32 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %arrayidx2 = getelementptr inbounds i16, i16* %a, i64 %indvars.iv.next + %1 = load i16, i16* %arrayidx2 + %conv3 = sext i16 %1 to i32 + %mul = mul nsw i32 %conv3, %conv + %arrayidx5 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv + store i32 %mul, i32* %arrayidx5 + %exitcond = icmp eq i64 %indvars.iv.next, %n + br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0 + +for.end: + ret void +} + +!0 = distinct !{!0, !1} +!1 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}