Index: llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp =================================================================== --- llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -1609,14 +1609,32 @@ LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump()); return false; } - if (isa(I) || isa(I)) { Value *Ptr = isa(I) ? I.getOperand(0) : I.getOperand(1); int64_t NextStride = getPtrStride(PSE, Ptr, L); + if (NextStride == 0) { + LLVM_DEBUG(dbgs() << "Bad stride found, can't " + "tail-predicate\n."); + return false; + } + if (NextStride == 2 && isa(I)) { + LLVM_DEBUG(dbgs() << "Consecutive strides of 2 found, vld2 can't " + "be tail-predicated\n."); + return false; + } + if (EnableMaskedGatherScatters) { + if (NextStride > 0) { + continue; + } else { + LLVM_DEBUG(dbgs() << "Negative stride found, this can't " + "be tail-predicated\n."); + return false; + } + } // TODO: for now only allow consecutive strides of 1. We could support // other strides as long as it is uniform, but let's keep it simple for // now. - if (Stride == 0 && NextStride == 1) { + if (NextStride == 1) { Stride = NextStride; continue; } Index: llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll @@ -0,0 +1,174 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -loop-vectorize -force-vector-width=4 -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedgatscat -tail-predication=force-enabled -S %s -o - | FileCheck %s + +define void @test_stride1_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n) { +; CHECK-LABEL: @test_stride1_4i32( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT1:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT2]], +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw nsw i32 [[TMP0]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], i32 [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[BROADCAST_SPLAT:%.*]], i32 0 +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[TMP0]], i32 [[TMP4]]) +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i32 0 +; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>* +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP6]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> , [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[TMP0]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 0 +; CHECK-NEXT: [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>* +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP7]], <4 x i32>* [[TMP10]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0 +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %mul = mul nuw nsw i32 %i.023, 1 + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +define void @test_stride-1_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n) { +; CHECK-LABEL: @test_stride-1_4i32( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ] +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP9:%.*]] = mul nuw nsw i32 [[TMP8]], -1 +; CHECK-NEXT: [[TMP10:%.*]] = add nuw nsw i32 [[TMP9]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], i32 [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i32 0 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP12]], i32 -3 +; CHECK-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP13]] to <4 x i32>* +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP14]], align 4 +; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> undef, <4 x i32> +; CHECK-NEXT: [[TMP15:%.*]] = add nsw <4 x i32> , [[REVERSE]] +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[TMP8]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP16]], i32 0 +; CHECK-NEXT: [[TMP18:%.*]] = bitcast i32* [[TMP17]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP15]], <4 x i32>* [[TMP18]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] +; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !4 +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %mul = mul nuw nsw i32 %i.023, -1 + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +define void @test_stride2_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n) { +; +; CHECK-LABEL: @test_stride2_4i32( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ] +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw nsw i32 [[TMP2]], 2 +; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i32 [[TMP3]], 2 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], i32 [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP5]], i32 0 +; CHECK-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to <8 x i32>* +; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, <8 x i32>* [[TMP7]], align 4 +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> undef, <4 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> , [[STRIDED_VEC]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[TMP2]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* [[TMP11]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !6 +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %mul = mul nuw nsw i32 %i.023, 2 + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +define void @test_stride3_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n) { +; CHECK-LABEL: @test_stride3_4i32( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 3 +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw nsw <4 x i32> [[VEC_IND]], +; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw <4 x i32> [[TMP4]], +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], <4 x i32> [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[BROADCAST_SPLAT:%.*]], i32 0 +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[TMP0]], i32 [[TMP7]]) +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP6]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> , [[WIDE_MASKED_GATHER]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[TMP0]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>* +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP8]], <4 x i32>* [[TMP11]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !8 +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %mul = mul nuw nsw i32 %i.023, 3 + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) +declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>) +declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) +declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>) +declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)