Index: llvm/lib/Target/ARM/MVETailPredication.cpp =================================================================== --- llvm/lib/Target/ARM/MVETailPredication.cpp +++ llvm/lib/Target/ARM/MVETailPredication.cpp @@ -85,7 +85,6 @@ bool runOnLoop(Loop *L, LPPassManager&) override; private: - /// Perform the relevant checks on the loop and convert if possible. bool TryConvert(Value *TripCount); @@ -94,12 +93,15 @@ bool IsPredicatedVectorLoop(); /// Compute a value for the total number of elements that the predicated - /// loop will process. - Value *ComputeElements(Value *TripCount, VectorType *VecTy); + /// loop will process if it is a runtime value. + Value *ComputeRuntimeElements(Instruction *Predicate, Value *TripCount, + VectorType *VecTy, Instruction **Shuffle, + Instruction **Induction); /// Is the icmp that generates an i1 vector, based upon a loop counter /// and a limit that is defined outside the loop. - bool isTailPredicate(Instruction *Predicate, Value *NumElements); + bool isTailPredicate(Instruction *Predicate, Value *NumElements, + Instruction *Shuffle, Instruction *Induction); /// Insert the intrinsic to represent the effect of tail predication. void InsertVCTPIntrinsic(Instruction *Predicate, @@ -213,6 +215,7 @@ if (!Decrement) return false; + ClonedVCTPInExitBlock = false; LLVM_DEBUG(dbgs() << "ARM TP: Running on Loop: " << *L << *Setup << "\n" << *Decrement << "\n"); @@ -225,51 +228,31 @@ return false; } -bool MVETailPredication::isTailPredicate(Instruction *I, Value *NumElements) { - // Look for the following: - - // %trip.count.minus.1 = add i32 %N, -1 - // %broadcast.splatinsert10 = insertelement <4 x i32> undef, - // i32 %trip.count.minus.1, i32 0 - // %broadcast.splat11 = shufflevector <4 x i32> %broadcast.splatinsert10, - // <4 x i32> undef, - // <4 x i32> zeroinitializer - // ... - // ... - // %index = phi i32 - // %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0 - // %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, - // <4 x i32> undef, - // <4 x i32> zeroinitializer - // %induction = add <4 x i32> %broadcast.splat, - // %pred = icmp ule <4 x i32> %induction, %broadcast.splat11 - - // And return whether V == %pred. - +// Pattern match the loop iteration count setup: +// +// %trip.count.minus.1 = add i32 %N, -1 +// %broadcast.splatinsert10 = insertelement <4 x i32> undef, +// i32 %trip.count.minus.1, i32 0 +// %broadcast.splat11 = shufflevector <4 x i32> %broadcast.splatinsert10, +// <4 x i32> undef, +// <4 x i32> zeroinitializer +// .. +// vector.body: +// .. +// +static bool MatchElemCountLoopSetup(Loop *L, Instruction *Shuffle, + Value *NumElements) { using namespace PatternMatch; - - CmpInst::Predicate Pred; - Instruction *Shuffle = nullptr; - Instruction *Induction = nullptr; - - // The vector icmp - if (!match(I, m_ICmp(Pred, m_Instruction(Induction), - m_Instruction(Shuffle))) || - Pred != ICmpInst::ICMP_ULE) - return false; - - // First find the stuff outside the loop which is setting up the limit - // vector.... - // The invariant shuffle that broadcast the limit into a vector. Instruction *Insert = nullptr; - if (!match(Shuffle, m_ShuffleVector(m_Instruction(Insert), m_Undef(), - m_Zero()))) + + if (!match(Shuffle, + m_ShuffleVector(m_Instruction(Insert), m_Undef(), m_Zero()))) return false; // Insert the limit into a vector. Instruction *BECount = nullptr; - if (!match(Insert, m_InsertElement(m_Undef(), m_Instruction(BECount), - m_Zero()))) + if (!match(Insert, + m_InsertElement(m_Undef(), m_Instruction(BECount), m_Zero()))) return false; // The limit calculation, backedge count. @@ -280,13 +263,35 @@ if (TripCount != NumElements || !L->isLoopInvariant(BECount)) return false; - // Now back to searching inside the loop body... - // Find the add with takes the index iv and adds a constant vector to it. + return true; +} + +// Pattern match predicates/masks and determine if they use the loop induction +// variable to control the number of elements processed by the loop. If so, +// the loop is a candidate for tail-predication. +bool MVETailPredication::isTailPredicate(Instruction *ICmp, Value *NumElements, + Instruction *Shuffle, + Instruction *Induction) { + using namespace PatternMatch; + + // Now pattern match the loop body, and find the add with takes the index iv + // and adds a constant vector to it: + // + // vector.body: + // .. + // %index = phi i32 + // %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0 + // %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, + // <4 x i32> undef, + // <4 x i32> zeroinitializer + // %induction = add <4 x i32> %broadcast.splat, + // %pred = icmp ule <4 x i32> %induction, %broadcast.splat11 + Instruction *BroadcastSplat = nullptr; Constant *Const = nullptr; - if (!match(Induction, m_Add(m_Instruction(BroadcastSplat), - m_Constant(Const)))) - return false; + if (!match(Induction, + m_Add(m_Instruction(BroadcastSplat), m_Constant(Const)))) + return false; // Check that we're adding <0, 1, 2, 3... if (auto *CDS = dyn_cast(Const)) { @@ -297,9 +302,10 @@ } else return false; + Instruction *Insert = nullptr; // The shuffle which broadcasts the index iv into a vector. - if (!match(BroadcastSplat, m_ShuffleVector(m_Instruction(Insert), m_Undef(), - m_Zero()))) + if (!match(BroadcastSplat, + m_ShuffleVector(m_Instruction(Insert), m_Undef(), m_Zero()))) return false; // The insert element which initialises a vector with the index iv. @@ -327,7 +333,7 @@ return LHS == Phi; } -static VectorType* getVectorType(IntrinsicInst *I) { +static VectorType *getVectorType(IntrinsicInst *I) { unsigned TypeOp = I->getIntrinsicID() == Intrinsic::masked_load ? 0 : 1; auto *PtrTy = cast(I->getOperand(TypeOp)->getType()); return cast(PtrTy->getElementType()); @@ -361,8 +367,60 @@ return !MaskedInsts.empty(); } -Value* MVETailPredication::ComputeElements(Value *TripCount, - VectorType *VecTy) { +// Pattern match the predicate, which is an icmp with a constant vector of this +// form: +// +// icmp ult <4 x i32> %induction, +// +// and return the constant, i.e. 32002 in this example. This is assumed to be +// the scalar loop iteration count: the number of loop elements by the +// the vector loop. Further checks are performed in function isTailPredicate(), +// to verify 'induction' behaves as an induction variable. +// +static Value *ComputeConstElements(Instruction *Predicate, Value *TripCount, + VectorType *VecTy, Instruction **Induction) { + ConstantInt *TC = dyn_cast(TripCount); + if (!TC) + return nullptr; + + ConstantInt *VF = ConstantInt::get(cast(TripCount->getType()), + VecTy->getNumElements()); + using namespace PatternMatch; + CmpInst::Predicate CC; + + if (!match(Predicate, + m_ICmp(CC, m_Instruction(*Induction), m_AnyIntegralConstant())) || + CC != ICmpInst::ICMP_ULT) + return nullptr; + + LLVM_DEBUG(dbgs() << "ARM TP: icmp with constants: "; Predicate->dump();); + Value *ConstVec = Predicate->getOperand(1); + + auto *CDS = dyn_cast(ConstVec); + if (!CDS || CDS->getNumElements() != VF->getSExtValue()) + return nullptr; + + Constant *Const = CDS->getSplatValue(); + if (Const) { + LLVM_DEBUG(dbgs() << "ARM TP: Found const elem count: " << *Const << "\n"); + + // If the TripCount is a multiple of the vectorisation factor VF, then + // the vectoriser should not have produced masked loads/stores, i.e., we + // don't need tail-predication and we should not get here, so assert that. + assert(dyn_cast(Const)->getSExtValue() % VF->getSExtValue() != + 0 && + "tail-predication: trip count should not be a multiple of the VF"); + } + return Const; +} + +Value *MVETailPredication::ComputeRuntimeElements(Instruction *Predicate, + Value *TripCount, + VectorType *VecTy, + Instruction **Shuffle, + Instruction **Induction) { + + using namespace PatternMatch; const SCEV *TripCountSE = SE->getSCEV(TripCount); ConstantInt *VF = ConstantInt::get(cast(TripCount->getType()), VecTy->getNumElements()); @@ -370,8 +428,18 @@ if (VF->equalsInt(1)) return nullptr; - // TODO: Support constant trip counts. - auto VisitAdd = [&](const SCEVAddExpr *S) -> const SCEVMulExpr* { + CmpInst::Predicate Pred; + if (!match(Predicate, m_ICmp(Pred, m_Instruction(*Induction), + m_Instruction(*Shuffle))) || + Pred != ICmpInst::ICMP_ULE) + return nullptr; + + LLVM_DEBUG(dbgs() << "Computing number of elements for vector trip count: "; + TripCount->dump()); + + // Otherwise, continue and try to pattern match the vector iteration + // count expression + auto VisitAdd = [&](const SCEVAddExpr *S) -> const SCEVMulExpr * { if (auto *Const = dyn_cast(S->getOperand(0))) { if (Const->getAPInt() != -VF->getValue()) return nullptr; @@ -380,7 +448,7 @@ return dyn_cast(S->getOperand(1)); }; - auto VisitMul = [&](const SCEVMulExpr *S) -> const SCEVUDivExpr* { + auto VisitMul = [&](const SCEVMulExpr *S) -> const SCEVUDivExpr * { if (auto *Const = dyn_cast(S->getOperand(0))) { if (Const->getValue() != VF) return nullptr; @@ -389,7 +457,7 @@ return dyn_cast(S->getOperand(1)); }; - auto VisitDiv = [&](const SCEVUDivExpr *S) -> const SCEV* { + auto VisitDiv = [&](const SCEVUDivExpr *S) -> const SCEV * { if (auto *Const = dyn_cast(S->getRHS())) { if (Const->getValue() != VF) return nullptr; @@ -545,7 +613,7 @@ bool MVETailPredication::TryConvert(Value *TripCount) { if (!IsPredicatedVectorLoop()) { - LLVM_DEBUG(dbgs() << "ARM TP: no masked instructions in loop"); + LLVM_DEBUG(dbgs() << "ARM TP: no masked instructions in loop.\n"); return false; } @@ -564,21 +632,37 @@ continue; VectorType *VecTy = getVectorType(I); - Value *NumElements = ComputeElements(TripCount, VecTy); - if (!NumElements) + Instruction *Shuffle = nullptr; + Instruction *Induction = nullptr; + Value *NumElements = nullptr; + + auto FoundPredicate = [&]() { + LLVM_DEBUG(dbgs() << "ARM TP: Found tail predicate: " << *Predicate + << "\n"); + Predicates.insert(Predicate); + InsertVCTPIntrinsic(Predicate, NewPredicates, VecTy, NumElements); + }; + + if ((NumElements = + ComputeConstElements(Predicate, TripCount, VecTy, &Induction)) && + isTailPredicate(Predicate, NumElements, Shuffle, Induction)) { + FoundPredicate(); continue; + } - if (!isTailPredicate(Predicate, NumElements)) { - LLVM_DEBUG(dbgs() << "ARM TP: Not tail predicate: " << *Predicate << "\n"); + if ((NumElements = ComputeRuntimeElements(Predicate, TripCount, VecTy, + &Shuffle, &Induction)) && + MatchElemCountLoopSetup(L, Shuffle, NumElements) && + isTailPredicate(Predicate, NumElements, Shuffle, Induction)) { + FoundPredicate(); continue; } - - LLVM_DEBUG(dbgs() << "ARM TP: Found tail predicate: " << *Predicate << "\n"); - Predicates.insert(Predicate); - - InsertVCTPIntrinsic(Predicate, NewPredicates, VecTy, NumElements); + LLVM_DEBUG(dbgs() << "ARM TP: Not tail predicate: " << *Predicate << "\n"); } + if (!NewPredicates.size()) + return false; + // Now clean up. ClonedVCTPInExitBlock = Cleanup(NewPredicates, Predicates, L); return true; Index: llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-const.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/LowOverheadLoops/tail-pred-const.ll @@ -0,0 +1,329 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -mtriple=thumbv8.1m.main -mve-tail-predication -disable-mve-tail-predication=false -mattr=+mve %s -S -o - | FileCheck %s + +define dso_local void @foo(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 { +; CHECK-LABEL: @foo( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.set.loop.iterations.i32(i32 8001) +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ] +; CHECK-NEXT: [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 8001, [[ENTRY]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ 32002, [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>* +; CHECK-NEXT: [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>* +; CHECK-NEXT: [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>* +; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i1> @llvm.arm.mve.vctp32(i32 [[TMP1]]) +; CHECK-NEXT: [[TMP3]] = sub i32 [[TMP1]], 4 +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV10]], i32 4, <4 x i1> [[TMP2]], <4 x i32> undef) +; CHECK-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV1113]], i32 4, <4 x i1> [[TMP2]], <4 x i32> undef) +; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP4]], <4 x i32>* [[LSR_IV1416]], i32 4, <4 x i1> [[TMP2]]) +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4 +; CHECK-NEXT: [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4 +; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4 +; CHECK-NEXT: [[TMP5]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[TMP0]], i32 1) +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + call void @llvm.set.loop.iterations.i32(i32 8001) + br label %vector.body + +vector.body: + %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ] + %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ] + %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ] + %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = phi i32 [ 8001, %entry ], [ %3, %vector.body ] + %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>* + %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>* + %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>* + %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer + %induction = add <4 x i32> %broadcast.splat, + %1 = icmp ult <4 x i32> %induction, + %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef) + %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef) + %2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1) + %index.next = add i32 %index, 4 + %scevgep = getelementptr i32, i32* %lsr.iv, i32 4 + %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4 + %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4 + %3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1) + %4 = icmp ne i32 %3, 0 + br i1 %4, label %vector.body, label %for.cond.cleanup + +for.cond.cleanup: + ret void +} + +; Silly test case: the loop count is constant and a multiple of the vectorisation +; factor. So, the vectoriser should not produce masked loads/stores and there's +; nothing to tail-predicate here, just checking. +define dso_local void @foo2(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 { +; CHECK-LABEL: @foo2( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.set.loop.iterations.i32(i32 2000) +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ] +; CHECK-NEXT: [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 2000, [[ENTRY]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>* +; CHECK-NEXT: [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>* +; CHECK-NEXT: [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>* +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[LSR_IV10]], align 4 +; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x i32>, <4 x i32>* [[LSR_IV1113]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD9]], [[WIDE_LOAD]] +; CHECK-NEXT: store <4 x i32> [[TMP1]], <4 x i32>* [[LSR_IV1416]], align 4 +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4 +; CHECK-NEXT: [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4 +; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4 +; CHECK-NEXT: [[TMP2]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[TMP0]], i32 1) +; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 +; CHECK-NEXT: br i1 [[TMP3]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + call void @llvm.set.loop.iterations.i32(i32 2000) + br label %vector.body + +vector.body: + %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ] + %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ] + %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ] + %0 = phi i32 [ 2000, %entry ], [ %2, %vector.body ] + %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>* + %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>* + %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %lsr.iv10, align 4 + %wide.load9 = load <4 x i32>, <4 x i32>* %lsr.iv1113, align 4 + %1 = add nsw <4 x i32> %wide.load9, %wide.load + store <4 x i32> %1, <4 x i32>* %lsr.iv1416, align 4 + %scevgep = getelementptr i32, i32* %lsr.iv, i32 4 + %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4 + %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4 + %2 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1) + %3 = icmp ne i32 %2, 0 + br i1 %3, label %vector.body, label %for.cond.cleanup + +for.cond.cleanup: + ret void +} + +; Check that the icmp is a ult +define dso_local void @foo3(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 { +; CHECK-LABEL: @foo3( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.set.loop.iterations.i32(i32 8001) +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ] +; CHECK-NEXT: [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 8001, [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>* +; CHECK-NEXT: [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>* +; CHECK-NEXT: [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>* +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt <4 x i32> [[INDUCTION]], +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV10]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV1113]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP2]], <4 x i32>* [[LSR_IV1416]], i32 4, <4 x i1> [[TMP1]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4 +; CHECK-NEXT: [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4 +; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4 +; CHECK-NEXT: [[TMP3]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[TMP0]], i32 1) +; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0 +; CHECK-NEXT: br i1 [[TMP4]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + call void @llvm.set.loop.iterations.i32(i32 8001) + br label %vector.body + +vector.body: + %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ] + %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ] + %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ] + %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = phi i32 [ 8001, %entry ], [ %3, %vector.body ] + %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>* + %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>* + %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>* + %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer + %induction = add <4 x i32> %broadcast.splat, + +; UGT here: + %1 = icmp ugt <4 x i32> %induction, + %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef) + %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef) + %2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1) + %index.next = add i32 %index, 4 + %scevgep = getelementptr i32, i32* %lsr.iv, i32 4 + %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4 + %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4 + %3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1) + %4 = icmp ne i32 %3, 0 + br i1 %4, label %vector.body, label %for.cond.cleanup + +for.cond.cleanup: + ret void +} + +; Check that this loop behaves as expected, i.e, that the loop increment is +; an increment and not a decrement. +define dso_local void @foo4(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 { +; CHECK-LABEL: @foo4( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.set.loop.iterations.i32(i32 8001) +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ] +; CHECK-NEXT: [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 8001, [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>* +; CHECK-NEXT: [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>* +; CHECK-NEXT: [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>* +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <4 x i32> [[INDUCTION]], +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV10]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV1113]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP2]], <4 x i32>* [[LSR_IV1416]], i32 4, <4 x i1> [[TMP1]]) +; CHECK-NEXT: [[INDEX_NEXT]] = sub i32 [[INDEX]], 4 +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4 +; CHECK-NEXT: [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4 +; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4 +; CHECK-NEXT: [[TMP3]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[TMP0]], i32 1) +; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0 +; CHECK-NEXT: br i1 [[TMP4]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + call void @llvm.set.loop.iterations.i32(i32 8001) + br label %vector.body + +vector.body: + %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ] + %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ] + %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ] + %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = phi i32 [ 8001, %entry ], [ %3, %vector.body ] + %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>* + %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>* + %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>* + %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer + %induction = add <4 x i32> %broadcast.splat, + %1 = icmp ult <4 x i32> %induction, + %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef) + %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef) + %2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1) + +; Counting down: + %index.next = sub i32 %index, 4 + %scevgep = getelementptr i32, i32* %lsr.iv, i32 4 + %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4 + %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4 + %3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1) + %4 = icmp ne i32 %3, 0 + br i1 %4, label %vector.body, label %for.cond.cleanup + +for.cond.cleanup: + ret void +} + +define dso_local void @foo5(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32* noalias nocapture readnone %D, i32 %N) local_unnamed_addr #0 { +; CHECK-LABEL: @foo5( +; CHECK-NEXT: entry: +; CHECK-NEXT: call void @llvm.set.loop.iterations.i32(i32 8001) +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[LSR_IV14:%.*]] = phi i32* [ [[SCEVGEP15:%.*]], [[VECTOR_BODY]] ], [ [[A:%.*]], [[ENTRY:%.*]] ] +; CHECK-NEXT: [[LSR_IV11:%.*]] = phi i32* [ [[SCEVGEP12:%.*]], [[VECTOR_BODY]] ], [ [[C:%.*]], [[ENTRY]] ] +; CHECK-NEXT: [[LSR_IV:%.*]] = phi i32* [ [[SCEVGEP:%.*]], [[VECTOR_BODY]] ], [ [[B:%.*]], [[ENTRY]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 8001, [[ENTRY]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[LSR_IV1416:%.*]] = bitcast i32* [[LSR_IV14]] to <4 x i32>* +; CHECK-NEXT: [[LSR_IV1113:%.*]] = bitcast i32* [[LSR_IV11]] to <4 x i32>* +; CHECK-NEXT: [[LSR_IV10:%.*]] = bitcast i32* [[LSR_IV]] to <4 x i32>* +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT]], +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <4 x i32> [[INDUCTION]], +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV10]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[WIDE_MASKED_LOAD9:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[LSR_IV1113]], i32 4, <4 x i1> [[TMP1]], <4 x i32> undef) +; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_LOAD9]], [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP2]], <4 x i32>* [[LSR_IV1416]], i32 4, <4 x i1> [[TMP1]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i32, i32* [[LSR_IV]], i32 4 +; CHECK-NEXT: [[SCEVGEP12]] = getelementptr i32, i32* [[LSR_IV11]], i32 4 +; CHECK-NEXT: [[SCEVGEP15]] = getelementptr i32, i32* [[LSR_IV14]], i32 4 +; CHECK-NEXT: [[TMP3]] = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 [[TMP0]], i32 1) +; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0 +; CHECK-NEXT: br i1 [[TMP4]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + call void @llvm.set.loop.iterations.i32(i32 8001) + br label %vector.body + +vector.body: + %lsr.iv14 = phi i32* [ %scevgep15, %vector.body ], [ %A, %entry ] + %lsr.iv11 = phi i32* [ %scevgep12, %vector.body ], [ %C, %entry ] + %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %B, %entry ] + %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = phi i32 [ 8001, %entry ], [ %3, %vector.body ] + %lsr.iv1416 = bitcast i32* %lsr.iv14 to <4 x i32>* + %lsr.iv1113 = bitcast i32* %lsr.iv11 to <4 x i32>* + %lsr.iv10 = bitcast i32* %lsr.iv to <4 x i32>* + %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer + %induction = add <4 x i32> %broadcast.splat, + +; non-uniform constant vector here: + %1 = icmp ult <4 x i32> %induction, + %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv10, i32 4, <4 x i1> %1, <4 x i32> undef) + %wide.masked.load9 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1113, i32 4, <4 x i1> %1, <4 x i32> undef) + %2 = add nsw <4 x i32> %wide.masked.load9, %wide.masked.load + call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %2, <4 x i32>* %lsr.iv1416, i32 4, <4 x i1> %1) + %index.next = add i32 %index, 4 + %scevgep = getelementptr i32, i32* %lsr.iv, i32 4 + %scevgep12 = getelementptr i32, i32* %lsr.iv11, i32 4 + %scevgep15 = getelementptr i32, i32* %lsr.iv14, i32 4 + %3 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %0, i32 1) + %4 = icmp ne i32 %3, 0 + br i1 %4, label %vector.body, label %for.cond.cleanup + +for.cond.cleanup: + ret void +} + +declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) #1 +declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) #2 +declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 , i32 ) +declare void @llvm.set.loop.iterations.i32(i32)