Index: lib/Target/ARM/ARMInstrMVE.td =================================================================== --- lib/Target/ARM/ARMInstrMVE.td +++ lib/Target/ARM/ARMInstrMVE.td @@ -3986,6 +3986,7 @@ def MVE_VDWDUPu16 : MVE_VxWDUP<"vdwdup", "u16", 0b01, 0b1>; def MVE_VDWDUPu32 : MVE_VxWDUP<"vdwdup", "u32", 0b10, 0b1>; +let hasSideEffects = 1 in class MVE_VCTP size, list pattern=[]> : MVE_p<(outs VCCR:$P0), (ins rGPR:$Rn), NoItinerary, "vctp", suffix, "$Rn", vpred_n, "", pattern> { Index: lib/Target/ARM/MVETailPredication.cpp =================================================================== --- lib/Target/ARM/MVETailPredication.cpp +++ lib/Target/ARM/MVETailPredication.cpp @@ -401,6 +401,7 @@ Module *M = L->getHeader()->getModule(); Type *Ty = IntegerType::get(M->getContext(), 32); SmallPtrSet Predicates; + DenseMap NewPredicates; for (auto *I : MaskedInsts) { Intrinsic::ID ID = I->getIntrinsicID(); @@ -445,6 +446,8 @@ Value *Remaining = Builder.CreateSub(Processed, Factor); Value *TailPredicate = Builder.CreateCall(VCTP, Remaining); Predicate->replaceAllUsesWith(TailPredicate); + NewPredicates[cast(Predicate)] = + cast(TailPredicate); // Add the incoming value to the new phi. Processed->addIncoming(Remaining, L->getLoopLatch()); @@ -453,6 +456,54 @@ << "TP: Inserted VCTP: " << *TailPredicate << "\n"); } + // Now clean up. + SetVector MaybeDead; + for (auto *Pred : Predicates) + MaybeDead.insert(cast(Pred)); + + // Look through the exit block to see whether there's a duplicate predicate + // instruction. This can happen when we need to perform a select on values + // from the last and previous iteration. Instead of doing a straight + // replacement of that predicate with the vctp, clone the vctp and place it + // in the block. This means that the VPR doesn't have to be live into the + // exit block which should make it easier to convert this loop into a proper + // tail predicated loop. + if (BasicBlock *Exit = L->getUniqueExitBlock()) { + for (auto &Pair : NewPredicates) { + Instruction *OldPred = Pair.first; + Instruction *NewPred = Pair.second; + + for (auto &I : *Exit) { + if (I.isSameOperationAs(OldPred)) { + Instruction *PredClone = NewPred->clone(); + PredClone->insertBefore(&I); + I.replaceAllUsesWith(PredClone); + MaybeDead.insert(&I); + break; + } + } + } + } + + // Drop references and add operands to check for dead. + SmallPtrSet Dead; + while (!MaybeDead.empty()) { + auto *I = MaybeDead.front(); + MaybeDead.remove(I); + if (I->hasNUsesOrMore(1)) + continue; + + for (auto &U : I->operands()) { + if (auto *OpI = dyn_cast(U)) + MaybeDead.insert(OpI); + } + I->dropAllReferences(); + Dead.insert(I); + } + + for (auto *I : Dead) + I->eraseFromParent(); + for (auto I : L->blocks()) DeleteDeadPHIs(I); Index: test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-codegen.ll =================================================================== --- /dev/null +++ test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-codegen.ll @@ -0,0 +1,59 @@ +; RUN: llc -mtriple=armv8.1m.main -mattr=+mve -enable-arm-maskedldst=true -disable-mve-tail-predication=false %s -o - | FileCheck %s + +; CHECK-LABEL: mul_reduce_add +; CHECK: dls lr, +; CHECK: [[LOOP:.LBB[0-9_]+]]: +; CHECK: sub{{.*}} [[ELEMS:r[0-9]+]], #4 +; CHECK: vctp.32 [[ELEMS]] +; CHECK: vpstt +; CHECK-NEXT: vldrwt.u32 +; CHECK-NEXT: vldrwt.u32 +; CHECK: le lr, [[LOOP]] +; CHECK: vctp.32 [[ELEMS]] +; CHECK: vpsel +; CHECK: vaddv.u32 r0 +define dso_local i32 @mul_reduce_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32 %N) { +entry: + %cmp8 = icmp eq i32 %N, 0 + br i1 %cmp8, label %for.cond.cleanup, label %vector.ph + +vector.ph: ; preds = %entry + %n.rnd.up = add i32 %N, 3 + %n.vec = and i32 %n.rnd.up, -4 + %trip.count.minus.1 = add i32 %N, -1 + %broadcast.splatinsert11 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0 + %broadcast.splat12 = shufflevector <4 x i32> %broadcast.splatinsert11, <4 x i32> undef, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %6, %vector.body ] + %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer + %induction = add <4 x i32> %broadcast.splat, + %0 = getelementptr inbounds i32, i32* %a, i32 %index + %1 = icmp ule <4 x i32> %induction, %broadcast.splat12 + %2 = bitcast i32* %0 to <4 x i32>* + %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %2, i32 4, <4 x i1> %1, <4 x i32> undef) + %3 = getelementptr inbounds i32, i32* %b, i32 %index + %4 = bitcast i32* %3 to <4 x i32>* + %wide.masked.load13 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %4, i32 4, <4 x i1> %1, <4 x i32> undef) + %5 = mul nsw <4 x i32> %wide.masked.load13, %wide.masked.load + %6 = add nsw <4 x i32> %5, %vec.phi + %index.next = add i32 %index, 4 + %7 = icmp eq i32 %index.next, %n.vec + br i1 %7, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %8 = select <4 x i1> %1, <4 x i32> %6, <4 x i32> %vec.phi + %9 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %8) + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %middle.block, %entry + %res.0.lcssa = phi i32 [ 0, %entry ], [ %9, %middle.block ] + ret i32 %res.0.lcssa +} + +declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) +declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) + Index: test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll =================================================================== --- /dev/null +++ test/CodeGen/Thumb2/LowOverheadLoops/vector-reduce-mve-tail.ll @@ -0,0 +1,75 @@ + +; RUN: opt -mtriple=thumbv8.1m.main -mve-tail-predication -disable-mve-tail-predication=false -mattr=+mve %s -S -o - | FileCheck %s + +; CHECK-LABEL: vec_mul_reduce_add + +; CHECK: vector.body: +; CHECK-NOT: phi i32 [ 0, %vector.ph ] +; CHECK: [[ELTS:%[^ ]+]] = phi i32 [ %N, %vector.ph ], [ [[SUB:%[^ ]+]], %vector.body ] +; CHECK: [[SUB]] = sub i32 [[ELTS]], 4 +; CHECK: [[VCTP:%[^ ]+]] = call <4 x i1> @llvm.arm.vctp32(i32 [[SUB]]) +; CHECK: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]] +; CHECK: call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* {{.*}}, i32 4, <4 x i1> [[VCTP]], + +; CHECK: middle.block: +; CHECK: [[VCTP_CLONE:%[^ ]+]] = call <4 x i1> @llvm.arm.vctp32(i32 [[SUB]]) +; CHECK: [[VPSEL:%[^ ]+]] = select <4 x i1> [[VCTP_CLONE]], +; CHECK: call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> [[VPSEL]]) + +define i32 @vec_mul_reduce_add(i32* noalias nocapture readonly %a, i32* noalias nocapture readonly %b, i32 %N) { +entry: + %cmp8 = icmp eq i32 %N, 0 + %0 = add i32 %N, 3 + %1 = lshr i32 %0, 2 + %2 = shl nuw i32 %1, 2 + %3 = add i32 %2, -4 + %4 = lshr i32 %3, 2 + %5 = add nuw nsw i32 %4, 1 + br i1 %cmp8, label %for.cond.cleanup, label %vector.ph + +vector.ph: ; preds = %entry + %trip.count.minus.1 = add i32 %N, -1 + %broadcast.splatinsert11 = insertelement <4 x i32> undef, i32 %trip.count.minus.1, i32 0 + %broadcast.splat12 = shufflevector <4 x i32> %broadcast.splatinsert11, <4 x i32> undef, <4 x i32> zeroinitializer + call void @llvm.set.loop.iterations.i32(i32 %5) + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %lsr.iv2 = phi i32* [ %scevgep3, %vector.body ], [ %a, %vector.ph ] + %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %b, %vector.ph ] + %vec.phi = phi <4 x i32> [ zeroinitializer, %vector.ph ], [ %9, %vector.body ] + %6 = phi i32 [ %5, %vector.ph ], [ %10, %vector.body ] + %lsr.iv24 = bitcast i32* %lsr.iv2 to <4 x i32>* + %lsr.iv1 = bitcast i32* %lsr.iv to <4 x i32>* + %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer + %induction = add <4 x i32> %broadcast.splat, + %7 = icmp ule <4 x i32> %induction, %broadcast.splat12 + %wide.masked.load = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv24, i32 4, <4 x i1> %7, <4 x i32> undef) + %wide.masked.load13 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %lsr.iv1, i32 4, <4 x i1> %7, <4 x i32> undef) + %8 = mul nsw <4 x i32> %wide.masked.load13, %wide.masked.load + %9 = add nsw <4 x i32> %8, %vec.phi + %index.next = add i32 %index, 4 + %scevgep = getelementptr i32, i32* %lsr.iv, i32 4 + %scevgep3 = getelementptr i32, i32* %lsr.iv2, i32 4 + %10 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1) + %11 = icmp ne i32 %10, 0 + br i1 %11, label %vector.body, label %middle.block + +middle.block: ; preds = %vector.body + %12 = icmp ule <4 x i32> %induction, %broadcast.splat12 + %13 = select <4 x i1> %12, <4 x i32> %9, <4 x i32> %vec.phi + %14 = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %13) + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %middle.block, %entry + %res.0.lcssa = phi i32 [ 0, %entry ], [ %14, %middle.block ] + ret i32 %res.0.lcssa +} + +declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32 immarg, <4 x i1>, <4 x i32>) +declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) +declare void @llvm.set.loop.iterations.i32(i32) +declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) +