Index: llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp =================================================================== --- llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -1000,6 +1000,90 @@ return true; } +static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) { + // We don't allow icmp's, and because we only look at single block loops, + // we simply count the icmps, i.e. there should only be 1 for the backedge. + if (isa(&I) && ++ICmpCount > 1) + return false; + + if (isa(&I) || + isa(&I) || + isa(&I)) { + LLVM_DEBUG(dbgs() << "not allowing instruction: "; I.dump()); + return false; + } + return true; +} + +static bool consecutiveLoadStores(SmallVector &LoadStores, + ScalarEvolution &SE, + const DataLayout &DL, Loop *L, + const LoopAccessInfo *LAI) { + PredicatedScalarEvolution PSE = LAI->getPSE(); + SmallSet Strides; + for (auto &I : LoadStores) { + Value *Ptr = isa(I) ? I->getOperand(0) : I->getOperand(1); + int64_t Stride = getPtrStride(PSE, Ptr, L); + Strides.insert(Stride); + LLVM_DEBUG(dbgs() << "Stride = " << Stride << ":"; I->dump()); + } + return Strides.size() == 1 && *Strides.begin() == 1; +} + +// To set up a tail-predicated loop, we need to know the total number of +// elements processed by that loop. Thus, we need to determine the element +// size and: +// 1) it should be uniform for all operations in the vector loop, so we +// e.g. don't want any widening/narrowing operations. +// 2) it should be smaller than i64s because we don't have vector operations +// that work on i64s. +// 3) we don't want elements to be reversed or shuffled, to make sure the +// tail-predication masks/predicates the right lanes. +// +static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE, + const DataLayout &DL, + const LoopAccessInfo *LAI) { + int ICmpCount = 0; + LLVM_DEBUG(dbgs() << "tail-predication: checking allowed instructions\n"); + SmallVector LoadStores; + for (BasicBlock *BB : L->blocks()) { + for (Instruction &I : BB->instructionsWithoutDebug()) { + if (dyn_cast(&I)) + continue; + if (!canTailPredicateInstruction(I, ICmpCount)) + return false; + + Type *T = I.getType(); + if (T->isPointerTy()) + T = T->getPointerElementType(); + + // TODO: the float types + if (T->isHalfTy() || T->isFloatTy() || T->isDoubleTy()) + return false; + + if (T->getScalarSizeInBits() > 32) { + LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump()); + return false; + } + + // Collect all loads and stores + if (isa(I) || isa(I)) + LoadStores.push_back(&I); + } + } + + LLVM_DEBUG(dbgs() << "Number of loads/stores to analyse: " + << LoadStores.size() << "\n"); + + if (!consecutiveLoadStores(LoadStores, SE, DL, L, LAI)) { + LLVM_DEBUG(dbgs() << "Different strides found, can't tail-predicate\n."); + return false; + } + + LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n"); + return true; +} + bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE, AssumptionCache &AC, @@ -1012,6 +1096,15 @@ if (!ST->hasMVEIntegerOps()) return false; + // For now, restrict this to single block loops. + if (L->getNumBlocks() > 1) { + LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block " + "loop.\n"); + return false; + } + + assert(L->empty() && "preferPredicateOverEpilogue: inner-loop expected"); + HardwareLoopInfo HWLoopInfo(L); if (!HWLoopInfo.canAnalyze(*LI)) { LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not " @@ -1033,14 +1126,7 @@ return false; } - // TODO: to set up a tail-predicated loop, which works by setting up - // the total number of elements processed by the loop, we need to - // determine the element size here, and if it is uniform for all operations - // in the vector loop. This means we will reject narrowing/widening - // operations, and don't want to predicate the vector loop, which is - // the main prep step for tail-predicated loops. - - return false; + return canTailPredicateLoop(L, LI, SE, DL, LAI); } Index: llvm/test/Transforms/LoopVectorize/ARM/prefer-tail-loop-folding.ll =================================================================== --- llvm/test/Transforms/LoopVectorize/ARM/prefer-tail-loop-folding.ll +++ llvm/test/Transforms/LoopVectorize/ARM/prefer-tail-loop-folding.ll @@ -16,18 +16,298 @@ ; RUN: opt -mtriple=thumbv8.1m.main-arm-eabihf -mattr=+mve < %s -loop-vectorize -enable-arm-maskedldst=true -S | \ ; RUN: FileCheck %s -check-prefixes=CHECK,PREFER-FOLDING -define dso_local void @tail_folding(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) { -; CHECK-LABEL: tail_folding( +define void @prefer_folding(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) #0 { +; CHECK-LABEL: prefer_folding( +; PREFER-FOLDING: vector.body: +; PREFER-FOLDING: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32 +; PREFER-FOLDING: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32 +; PREFER-FOLDING: call void @llvm.masked.store.v4i32.p0v4i32 +; PREFER-FOLDING: br i1 %{{.*}}, label %{{.*}}, label %vector.body ; -; NO-FOLDING-NOT: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32( -; NO-FOLDING-NOT: call void @llvm.masked.store.v4i32.p0v4i32( -; -; TODO: this needs implementation of TTI::preferPredicateOverEpilogue, -; then this will be tail-folded too: +; NO-FOLDING-NOT: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32( +; NO-FOLDING-NOT: call void @llvm.masked.store.v4i32.p0v4i32( +; NO-FOLDING: br i1 %{{.*}}, label %{{.*}}, label %for.body +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.09 = phi i32 [ 0, %entry ], [ %add3, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %B, i32 %i.09 + %0 = load i32, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %C, i32 %i.09 + %1 = load i32, i32* %arrayidx1, align 4 + %add = add nsw i32 %1, %0 + %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %i.09 + store i32 %add, i32* %arrayidx2, align 4 + %add3 = add nuw nsw i32 %i.09, 1 + %exitcond = icmp eq i32 %add3, 431 + br i1 %exitcond, label %for.cond.cleanup, label %for.body +} + +define void @mixed_types(i16* noalias nocapture %A, i16* noalias nocapture readonly %B, i16* noalias nocapture readonly %C, i32* noalias nocapture %D, i32* noalias nocapture readonly %E, i32* noalias nocapture readonly %F) #0 { +; CHECK-LABEL: mixed_types( +; PREFER-FOLDING: vector.body: +; PREFER-FOLDING: call <4 x i16> @llvm.masked.load.v4i16.p0v4i16 +; PREFER-FOLDING: call <4 x i16> @llvm.masked.load.v4i16.p0v4i16 +; PREFER-FOLDING: call void @llvm.masked.store.v4i16.p0v4i16 +; PREFER-FOLDING: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32 +; PREFER-FOLDING: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32 +; PREFER-FOLDING: call void @llvm.masked.store.v4i32.p0v4i32 +; PREFER-FOLDING: br i1 %{{.*}}, label %{{.*}}, label %vector.body +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.018 = phi i32 [ 0, %entry ], [ %add9, %for.body ] + %arrayidx = getelementptr inbounds i16, i16* %B, i32 %i.018 + %0 = load i16, i16* %arrayidx, align 2 + %arrayidx1 = getelementptr inbounds i16, i16* %C, i32 %i.018 + %1 = load i16, i16* %arrayidx1, align 2 + %add = add i16 %1, %0 + %arrayidx4 = getelementptr inbounds i16, i16* %A, i32 %i.018 + store i16 %add, i16* %arrayidx4, align 2 + %arrayidx5 = getelementptr inbounds i32, i32* %E, i32 %i.018 + %2 = load i32, i32* %arrayidx5, align 4 + %arrayidx6 = getelementptr inbounds i32, i32* %F, i32 %i.018 + %3 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 %3, %2 + %arrayidx8 = getelementptr inbounds i32, i32* %D, i32 %i.018 + store i32 %add7, i32* %arrayidx8, align 4 + %add9 = add nuw nsw i32 %i.018, 1 + %exitcond = icmp eq i32 %add9, 431 + br i1 %exitcond, label %for.cond.cleanup, label %for.body +} + +define void @unsupported_i64_type(i64* noalias nocapture %A, i64* noalias nocapture readonly %B, i64* noalias nocapture readonly %C) #0 { +; CHECK-LABEL: unsupported_i64_type( +; PREFER-FOLDING-NOT: vector.body: +; PREFER-FOLDING-NOT: llvm.masked.load +; PREFER-FOLDING-NOT: llvm.masked.store +; PREFER-FOLDING: for.body: +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.09 = phi i32 [ 0, %entry ], [ %add3, %for.body ] + %arrayidx = getelementptr inbounds i64, i64* %B, i32 %i.09 + %0 = load i64, i64* %arrayidx, align 8 + %arrayidx1 = getelementptr inbounds i64, i64* %C, i32 %i.09 + %1 = load i64, i64* %arrayidx1, align 8 + %add = add nsw i64 %1, %0 + %arrayidx2 = getelementptr inbounds i64, i64* %A, i32 %i.09 + store i64 %add, i64* %arrayidx2, align 8 + %add3 = add nuw nsw i32 %i.09, 1 + %exitcond = icmp eq i32 %add3, 431 + br i1 %exitcond, label %for.cond.cleanup, label %for.body +} + +define void @zext_not_allowed(i32* noalias nocapture %A, i8* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) #0 { +; CHECK-LABEL: zext_not_allowed( +; PREFER-FOLDING: vector.body: +; PREFER-FOLDING-NOT: llvm.masked.load +; PREFER-FOLDING-NOT: llvm.masked.store +; PREFER-FOLDING: br i1 %{{.*}}, label %{{.*}}, label %vector.body +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.09 = phi i32 [ 0, %entry ], [ %add3, %for.body ] + %arrayidx = getelementptr inbounds i8, i8* %B, i32 %i.09 + %0 = load i8, i8* %arrayidx, align 1 + %conv = zext i8 %0 to i32 + %arrayidx1 = getelementptr inbounds i32, i32* %C, i32 %i.09 + %1 = load i32, i32* %arrayidx1, align 4 + %add = add nsw i32 %1, %conv + %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %i.09 + store i32 %add, i32* %arrayidx2, align 4 + %add3 = add nuw nsw i32 %i.09, 1 + %exitcond = icmp eq i32 %add3, 431 + br i1 %exitcond, label %for.cond.cleanup, label %for.body +} + +define void @sext_not_allowed(i32* noalias nocapture %A, i8* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) #0 { +; CHECK-LABEL: sext_not_allowed( +; PREFER-FOLDING: vector.body: +; PREFER-FOLDING-NOT: llvm.masked.load +; PREFER-FOLDING-NOT: llvm.masked.store +; PREFER-FOLDING: br i1 %{{.*}}, label %{{.*}}, label %vector.body +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.09 = phi i32 [ 0, %entry ], [ %add3, %for.body ] + %arrayidx = getelementptr inbounds i8, i8* %B, i32 %i.09 + %0 = load i8, i8* %arrayidx, align 1 + %conv = sext i8 %0 to i32 + %arrayidx1 = getelementptr inbounds i32, i32* %C, i32 %i.09 + %1 = load i32, i32* %arrayidx1, align 4 + %add = add nsw i32 %1, %conv + %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %i.09 + store i32 %add, i32* %arrayidx2, align 4 + %add3 = add nuw nsw i32 %i.09, 1 + %exitcond = icmp eq i32 %add3, 431 + br i1 %exitcond, label %for.cond.cleanup, label %for.body +} + +define void @trunc_not_allowed(i8* noalias nocapture %A, i8* noalias nocapture readonly %B, i16* noalias nocapture readonly %C) #0 { +; CHECK-LABEL: trunc_not_allowed( +; PREFER-FOLDING: vector.body: +; PREFER-FOLDING-NOT: llvm.masked.load +; PREFER-FOLDING-NOT: llvm.masked.store +; PREFER-FOLDING: br i1 %{{.*}}, label %{{.*}}, label %vector.body +entry: + br label %for.body + +for.cond.cleanup: ; preds = %for.body + ret void + +for.body: ; preds = %for.body, %entry + %i.012 = phi i32 [ 0, %entry ], [ %add6, %for.body ] + %arrayidx = getelementptr inbounds i16, i16* %C, i32 %i.012 + %0 = load i16, i16* %arrayidx, align 2 + %arrayidx1 = getelementptr inbounds i8, i8* %B, i32 %i.012 + %1 = load i8, i8* %arrayidx1, align 1 + %conv3 = trunc i16 %0 to i8 + %add = add i8 %1, %conv3 + %arrayidx5 = getelementptr inbounds i8, i8* %A, i32 %i.012 + store i8 %add, i8* %arrayidx5, align 1 + %add6 = add nuw nsw i32 %i.012, 1 + %exitcond = icmp eq i32 %add6, 431 + br i1 %exitcond, label %for.cond.cleanup, label %for.body +} + +@tab = common global [32 x i8] zeroinitializer, align 1 + +define i32 @icmp_not_allowed() #0 { +; CHECK-LABEL: icmp_not_allowed( +; PREFER-FOLDING: vector.body: +; PREFER-FOLDING-NOT: llvm.masked.load +; PREFER-FOLDING-NOT: llvm.masked.store +; PREFER-FOLDING: br i1 %{{.*}}, label %{{.*}}, label %vector.body +entry: + br label %for.body + +for.body: + %i.08 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %arrayidx = getelementptr inbounds [32 x i8], [32 x i8]* @tab, i32 0, i32 %i.08 + %0 = load i8, i8* %arrayidx, align 1 + %cmp1 = icmp eq i8 %0, 0 + %. = select i1 %cmp1, i8 2, i8 1 + store i8 %., i8* %arrayidx, align 1 + %inc = add nsw i32 %i.08, 1 + %exitcond = icmp slt i32 %inc, 1000 + br i1 %exitcond, label %for.body, label %for.end + +for.end: + ret i32 0 +} + +define void @pragma_vect_predicate_disable(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) #0 { +; CHECK-LABEL: pragma_vect_predicate_disable( ; -; PREFER-FOLDING-NOT: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32( -; PREFER-FOLDING-NOT: call void @llvm.masked.store.v4i32.p0v4i32( +; FIXME: +; respect loop hint predicate.enable = false, and don't tail-fold here: ; +; PREFER-FOLDING: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32 +; PREFER-FOLDING: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32 +; PREFER-FOLDING: call void @llvm.masked.store.v4i32.p0v4i32 +; PREFER-FOLDING: br i1 %{{.*}}, label %{{.*}}, label %vector.body +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.09 = phi i32 [ 0, %entry ], [ %add3, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %B, i32 %i.09 + %0 = load i32, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %C, i32 %i.09 + %1 = load i32, i32* %arrayidx1, align 4 + %add = add nsw i32 %1, %0 + %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %i.09 + store i32 %add, i32* %arrayidx2, align 4 + %add3 = add nuw nsw i32 %i.09, 1 + %exitcond = icmp eq i32 %add3, 431 + br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !7 +} + +; Test directions for array indices i and N-1. I.e. check strides 1 and -1, and +; force vectorisation with a loop hint. +define void @strides_different_direction(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C, i32 %N) #0 { +; CHECK-LABEL: strides_different_direction( +; PREFER-FOLDING: vector.body: +; PREFER-FOLDING-NOT: llvm.masked.load +; PREFER-FOLDING-NOT: llvm.masked.store +; PREFER-FOLDING: br i1 %{{.*}}, label %{{.*}}, label %vector.body +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.09 = phi i32 [ 0, %entry ], [ %add3, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %B, i32 %i.09 + %0 = load i32, i32* %arrayidx, align 4 + %sub = sub nsw i32 %N, %i.09 + %arrayidx1 = getelementptr inbounds i32, i32* %C, i32 %sub + %1 = load i32, i32* %arrayidx1, align 4 + %add = add nsw i32 %1, %0 + %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %i.09 + store i32 %add, i32* %arrayidx2, align 4 + %add3 = add nuw nsw i32 %i.09, 1 + %exitcond = icmp eq i32 %add3, 431 + br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !10 +} + +define dso_local void @stride_4(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) local_unnamed_addr #0 { +; CHECK-LABEL: stride_4( +; PREFER-FOLDING: vector.body: +; PREFER-FOLDING-NOT: llvm.masked.load +; PREFER-FOLDING-NOT: llvm.masked.store +; PREFER-FOLDING: br i1 %{{.*}}, label %{{.*}}, label %vector.body +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.09 = phi i32 [ 0, %entry ], [ %add3, %for.body ] + %arrayidx = getelementptr inbounds i32, i32* %B, i32 %i.09 + %0 = load i32, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %C, i32 %i.09 + %1 = load i32, i32* %arrayidx1, align 4 + %add = add nsw i32 %1, %0 + %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %i.09 + store i32 %add, i32* %arrayidx2, align 4 + %add3 = add nuw nsw i32 %i.09, 4 + %cmp = icmp ult i32 %add3, 731 + br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !5 +} + +define void @too_many_loop_blocks(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i32* noalias nocapture readonly %C) #0 { +; CHECK-LABEL: too_many_loop_blocks( +; PREFER-FOLDING: vector.body: +; PREFER-FOLDING-NOT: llvm.masked.load +; PREFER-FOLDING-NOT: llvm.masked.store +; PREFER-FOLDING: br i1 %{{.*}}, label %{{.*}}, label %vector.body entry: br label %for.body @@ -35,15 +315,56 @@ ret void for.body: - %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] - %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv + %i.09 = phi i32 [ 0, %entry ], [ %add3, %loopincr ] + %arrayidx = getelementptr inbounds i32, i32* %B, i32 %i.09 %0 = load i32, i32* %arrayidx, align 4 - %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %indvars.iv - %1 = load i32, i32* %arrayidx2, align 4 + %arrayidx1 = getelementptr inbounds i32, i32* %C, i32 %i.09 + %1 = load i32, i32* %arrayidx1, align 4 %add = add nsw i32 %1, %0 - %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv - store i32 %add, i32* %arrayidx4, align 4 - %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 - %exitcond = icmp eq i64 %indvars.iv.next, 430 + %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %i.09 + store i32 %add, i32* %arrayidx2, align 4 + br label %loopincr + +loopincr: + %add3 = add nuw nsw i32 %i.09, 1 + %exitcond = icmp eq i32 %add3, 431 br i1 %exitcond, label %for.cond.cleanup, label %for.body } + +; TODO: the float types +define void @float(float* noalias nocapture %A, float* noalias nocapture readonly %B, float* noalias nocapture readonly %C) #0 { +; CHECK-LABEL: float( +; PREFER-FOLDING: vector.body: +; PREFER-FOLDING-NOT: llvm.masked.load +; PREFER-FOLDING-NOT: llvm.masked.store +; PREFER-FOLDING: br i1 %{{.*}}, label %{{.*}}, label %vector.body +entry: + br label %for.body + +for.cond.cleanup: + ret void + +for.body: + %i.09 = phi i32 [ 0, %entry ], [ %add3, %for.body ] + %arrayidx = getelementptr inbounds float, float* %B, i32 %i.09 + %0 = load float, float* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds float, float* %C, i32 %i.09 + %1 = load float, float* %arrayidx1, align 4 + %add = fadd fast float %1, %0 + %arrayidx2 = getelementptr inbounds float, float* %A, i32 %i.09 + store float %add, float* %arrayidx2, align 4 + %add3 = add nuw nsw i32 %i.09, 1 + %exitcond = icmp eq i32 %add3, 431 + br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !10 +} + +attributes #0 = { nofree norecurse nounwind "target-features"="+armv8.1-m.main,+mve.fp" } + +!5 = distinct !{!5, !6} +!6 = !{!"llvm.loop.vectorize.enable", i1 true} + +!7 = distinct !{!7, !8} +!8 = !{!"llvm.loop.vectorize.predicate.enable", i1 false} + +!10 = distinct !{!10, !11} +!11 = !{!"llvm.loop.vectorize.width", i32 4}