diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -143,7 +143,7 @@ cl::desc("Maximum size of ALL constants to promote into a constant pool"), cl::init(128)); -static cl::opt +cl::opt MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden, cl::desc("Maximum interleave factor for MVE VLDn to generate."), cl::init(2)); diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -52,6 +52,8 @@ extern cl::opt EnableMaskedGatherScatters; +extern cl::opt MVEMaxSupportedInterleaveFactor; + /// Convert a vector load intrinsic into a simple llvm load instruction. /// This is beneficial when the underlying object being addressed comes /// from a constant, since we get constant-folding for free. @@ -1643,7 +1645,6 @@ PredicatedScalarEvolution PSE = LAI->getPSE(); SmallVector LoadStores; int ICmpCount = 0; - int Stride = 0; for (BasicBlock *BB : L->blocks()) { for (Instruction &I : BB->instructionsWithoutDebug()) { @@ -1662,22 +1663,38 @@ LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump()); return false; } - if (isa(I) || isa(I)) { Value *Ptr = isa(I) ? I.getOperand(0) : I.getOperand(1); int64_t NextStride = getPtrStride(PSE, Ptr, L); - // TODO: for now only allow consecutive strides of 1. We could support - // other strides as long as it is uniform, but let's keep it simple for - // now. - if (Stride == 0 && NextStride == 1) { - Stride = NextStride; + if (NextStride == 1) { + // TODO: for now only allow consecutive strides of 1. We could support + // other strides as long as it is uniform, but let's keep it simple + // for now. continue; - } - if (Stride != NextStride) { - LLVM_DEBUG(dbgs() << "Different strides found, can't " - "tail-predicate\n."); + } else if (NextStride == -1 || + (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) || + (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) { + LLVM_DEBUG(dbgs() + << "Consecutive strides of 2 found, vld2/vstr2 can't " + "be tail-predicated\n."); return false; + // TODO: don't tail predicate if there is a reversed load? + } else if (EnableMaskedGatherScatters) { + // Gather/scatters do allow loading from arbitrary strides, at + // least if they are loop invariant. + // TODO: Loop variant strides should in theory work, too, but + // this requires further testing. + const SCEV *PtrScev = + replaceSymbolicStrideSCEV(PSE, llvm::ValueToValueMap(), Ptr); + if (auto AR = dyn_cast(PtrScev)) { + const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); + if (PSE.getSE()->isLoopInvariant(Step, L)) + continue; + } } + LLVM_DEBUG(dbgs() << "Bad stride found, can't " + "tail-predicate\n."); + return false; } } } diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll @@ -0,0 +1,391 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -loop-vectorize -force-vector-width=4 -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedgatscat -tail-predication=force-enabled -S %s -o - | FileCheck %s + +define void @test_stride1_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n) { +; CHECK-LABEL: @test_stride1_4i32( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT1:%.*]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT2]], +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw nsw i32 [[TMP0]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], i32 [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[BROADCAST_SPLAT:%.*]], i32 0 +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[TMP0]], i32 [[TMP4]]) +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i32 0 +; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>* +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP6]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> , [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[TMP0]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i32 0 +; CHECK-NEXT: [[TMP10:%.*]] = bitcast i32* [[TMP9]] to <4 x i32>* +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP7]], <4 x i32>* [[TMP10]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0 +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %mul = mul nuw nsw i32 %i.023, 1 + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +define void @test_stride-1_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n) { +; CHECK-LABEL: @test_stride-1_4i32( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ] +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP9:%.*]] = mul nuw nsw i32 [[TMP8]], -1 +; CHECK-NEXT: [[TMP10:%.*]] = add nuw nsw i32 [[TMP9]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], i32 [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i32 0 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, i32* [[TMP12]], i32 -3 +; CHECK-NEXT: [[TMP14:%.*]] = bitcast i32* [[TMP13]] to <4 x i32>* +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP14]], align 4 +; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i32> [[WIDE_LOAD]], <4 x i32> undef, <4 x i32> +; CHECK-NEXT: [[TMP15:%.*]] = add nsw <4 x i32> , [[REVERSE]] +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[TMP8]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP16]], i32 0 +; CHECK-NEXT: [[TMP18:%.*]] = bitcast i32* [[TMP17]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP15]], <4 x i32>* [[TMP18]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] +; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !4 +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %mul = mul nuw nsw i32 %i.023, -1 + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +define void @test_stride2_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n) { +; +; CHECK-LABEL: @test_stride2_4i32( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ] +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = mul nuw nsw i32 [[TMP2]], 2 +; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i32 [[TMP3]], 2 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], i32 [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP5]], i32 0 +; CHECK-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to <8 x i32>* +; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, <8 x i32>* [[TMP7]], align 4 +; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> undef, <4 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> , [[STRIDED_VEC]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[TMP2]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* [[TMP11]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !6 +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %mul = mul nuw nsw i32 %i.023, 2 + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +define void @test_stride3_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n) { +; CHECK-LABEL: @test_stride3_4i32( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 3 +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw nsw <4 x i32> [[VEC_IND]], +; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw <4 x i32> [[TMP4]], +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], <4 x i32> [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[BROADCAST_SPLAT:%.*]], i32 0 +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[TMP0]], i32 [[TMP7]]) +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP6]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> , [[WIDE_MASKED_GATHER]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[TMP0]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>* +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP8]], <4 x i32>* [[TMP11]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !8 +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %mul = mul nuw nsw i32 %i.023, 3 + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +define void @test_stride4_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n) { +; CHECK-LABEL: @test_stride4_4i32( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 3 +; CHECK-NEXT: [[TMP4:%.*]] = mul nuw nsw <4 x i32> [[VEC_IND]], +; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw <4 x i32> [[TMP4]], +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], <4 x i32> [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[BROADCAST_SPLAT:%.*]], i32 0 +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[TMP0]], i32 [[TMP7]]) +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP6]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> , [[WIDE_MASKED_GATHER]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[TMP0]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>* +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP8]], <4 x i32>* [[TMP11]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP10:!llvm.loop !.*]] +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %mul = mul nuw nsw i32 %i.023, 4 + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +define void @test_stride_loopinvar_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n, i32 %stride) { +; CHECK-LABEL: @test_stride_loopinvar_4i32( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i32> undef, i32 [[INDEX]], i32 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT1]], <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[BROADCAST_SPLAT2]], +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = mul nuw nsw i32 [[TMP1]], [[STRIDE:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i32 [[TMP2]], 2 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], i32 [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[BROADCAST_SPLAT]], i32 0 +; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 [[TMP1]], i32 [[TMP5]]) +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i32 0 +; CHECK-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to <4 x i32>* +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[TMP7]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]], <4 x i32> undef) +; CHECK-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> , [[WIDE_MASKED_LOAD]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[TMP1]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>* +; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[TMP8]], <4 x i32>* [[TMP11]], i32 4, <4 x i1> [[ACTIVE_LANE_MASK]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP12:!llvm.loop !.*]] +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %mul = mul nuw nsw i32 %i.023, %stride + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +define void @test_stride_noninvar_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n) { +; CHECK-LABEL: @test_stride_noninvar_4i32( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND2:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT3:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 1 +; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 2 +; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 3 +; CHECK-NEXT: [[TMP5:%.*]] = mul nuw nsw <4 x i32> [[VEC_IND]], [[VEC_IND2]] +; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw <4 x i32> [[TMP5]], +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], <4 x i32> [[TMP6]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP7]], i32 4, <4 x i1> , <4 x i32> undef) +; CHECK-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> , [[WIDE_MASKED_GATHER]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[TMP1]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, i32* [[TMP9]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = bitcast i32* [[TMP10]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* [[TMP11]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], +; CHECK-NEXT: [[VEC_IND_NEXT3]] = add <4 x i32> [[VEC_IND2]], +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP14:!llvm.loop !.*]] +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %stride = phi i32 [ %next.stride, %for.body ], [ 3, %entry ] + %mul = mul nuw nsw i32 %i.023, %stride + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %next.stride = add nuw nsw i32 %stride, 8 + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +define void @test_stride_noninvar2_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n) { +; CHECK-LABEL: @test_stride_noninvar2_4i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[STRIDE:%.*]] = phi i32 [ [[NEXT_STRIDE:%.*]], [[FOR_BODY]] ], [ 3, [[ENTRY]] ] +; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], [[STRIDE]] +; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2 +; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], i32 [[ADD5]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4 +; CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 5, [[TMP0]] +; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[I_023]] +; CHECK-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX9]], align 4 +; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_023]], 1 +; CHECK-NEXT: [[NEXT_STRIDE]] = mul nuw nsw i32 [[STRIDE]], 8 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[END:%.*]], label [[FOR_BODY]] +; CHECK: end: +; CHECK-NEXT: ret void +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %stride = phi i32 [ %next.stride, %for.body ], [ 3, %entry ] + %mul = mul nuw nsw i32 %i.023, %stride + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %next.stride = mul nuw nsw i32 %stride, 8 + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +define void @test_stride_noninvar3_4i32(i32* readonly %data, i32* noalias nocapture %dst, i32 %n, i32 %x) { +; CHECK-LABEL: @test_stride_noninvar3_4i32( +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY:%.*]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND4:%.*]] = phi <4 x i32> [ [[INDUCTION:%.*]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT5:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 +; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 1 +; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[INDEX]], 2 +; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[INDEX]], 3 +; CHECK-NEXT: [[TMP7:%.*]] = mul nuw nsw <4 x i32> [[VEC_IND]], [[VEC_IND4]] +; CHECK-NEXT: [[TMP8:%.*]] = add nuw nsw <4 x i32> [[TMP7]], +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, i32* [[DATA:%.*]], <4 x i32> [[TMP8]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> [[TMP9]], i32 4, <4 x i1> , <4 x i32> undef) +; CHECK-NEXT: [[TMP10:%.*]] = add nsw <4 x i32> , [[WIDE_MASKED_GATHER]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i32 [[TMP3]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i32 0 +; CHECK-NEXT: [[TMP13:%.*]] = bitcast i32* [[TMP12]] to <4 x i32>* +; CHECK-NEXT: store <4 x i32> [[TMP10]], <4 x i32>* [[TMP13]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], +; CHECK-NEXT: [[VEC_IND_NEXT5]] = add <4 x i32> [[VEC_IND4]], [[DOTSPLAT3:%.*]] +; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC:%.*]] +; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP16:!llvm.loop !.*]] +; +entry: + br label %for.body +for.body: ; preds = %for.body.preheader, %for.body + %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %stride = phi i32 [ %next.stride, %for.body ], [ 3, %entry ] + %mul = mul nuw nsw i32 %i.023, %stride + %add5 = add nuw nsw i32 %mul, 2 + %arrayidx6 = getelementptr inbounds i32, i32* %data, i32 %add5 + %0 = load i32, i32* %arrayidx6, align 4 + %add7 = add nsw i32 5, %0 + %arrayidx9 = getelementptr inbounds i32, i32* %dst, i32 %i.023 + store i32 %add7, i32* %arrayidx9, align 4 + %inc = add nuw nsw i32 %i.023, 1 + %next.stride = add nuw nsw i32 %stride, %x + %exitcond.not = icmp eq i32 %inc, %n + br i1 %exitcond.not, label %end, label %for.body +end: ; preds = %end, %entry + ret void +} + +declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>) +declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>) +declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32) +declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>) +declare void @llvm.masked.scatter.v4i32.v4p0i32(<4 x i32>, <4 x i32*>, i32, <4 x i1>)