diff --git a/llvm/test/Transforms/IndVarSimplify/simplify-tripcount.ll b/llvm/test/Transforms/IndVarSimplify/simplify-tripcount.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/IndVarSimplify/simplify-tripcount.ll @@ -0,0 +1,302 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -O3 -S -force-vector-width=2 -force-vector-interleave=1 -scalable-vectorization=on < %s | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + + +; int initial = 0; +; for(int i = 0; i < nOut; i++) { +; double temp_value = 0.0; +; for (int j = initial; j < initial + nIn; j++) { +; temp_value += values[j] * x[idx[j]]; +; } +; initial += nIn; +; b[i] = temp_value; +; } +; +; Function Attrs: nofree norecurse nosync nounwind uwtable vscale_range(1,16) +define dso_local void @testTCAdd(i32 noundef %nOut, double* nocapture noundef readonly %x, double* nocapture noundef writeonly %b, double* nocapture noundef readonly %values, i32 noundef %nIn, i32* nocapture noundef readonly %idx) local_unnamed_addr #0 { +; CHECK-LABEL: @testTCAdd( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP33:%.*]] = icmp sgt i32 [[NOUT:%.*]], 0 +; CHECK-NEXT: br i1 [[CMP33]], label [[FOR_COND1_PREHEADER_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond1.preheader.preheader: +; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[NIN:%.*]] to i64 +; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NOUT]] to i64 +; CHECK-NEXT: [[CMP229:%.*]] = icmp sgt i32 [[NIN]], 0 +; CHECK-NEXT: br i1 [[CMP229]], label [[FOR_COND1_PREHEADER_US_PREHEADER:%.*]], label [[FOR_COND1_PREHEADER_PREHEADER1:%.*]] +; CHECK: for.cond1.preheader.us.preheader: +; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 +; CHECK-NEXT: br label [[FOR_COND1_PREHEADER_US:%.*]] +; CHECK: for.cond1.preheader.preheader1: +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[WIDE_TRIP_COUNT]], 3 +; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr align 8 [[B:%.*]], i8 0, i64 [[TMP3]], i1 false) +; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] +; CHECK: for.cond1.preheader.us: +; CHECK-NEXT: [[INDVARS_IV39_US:%.*]] = phi i64 [ [[INDVARS_IV_NEXT40_US:%.*]], [[FOR_COND_CLEANUP3_LOOPEXIT_US:%.*]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ] +; CHECK-NEXT: [[INDVARS_IV_US:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_US:%.*]], [[FOR_COND_CLEANUP3_LOOPEXIT_US]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDVARS_IV_US]], [[TMP0]] +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDVARS_IV_US]], 1 +; CHECK-NEXT: [[SMAX:%.*]] = tail call i64 @llvm.smax.i64(i64 [[TMP4]], i64 [[TMP5]]) +; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[INDVARS_IV39_US]], [[TMP0]] +; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[SMAX]], [[TMP6]] +; CHECK-NEXT: [[INDVARS_IV_NEXT_US]] = add nsw i64 [[INDVARS_IV_US]], [[TMP0]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP7]], [[TMP2]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[FOR_BODY4_US_PREHEADER:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP8:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP8]], 1 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP7]], [[TMP9]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP7]], [[N_MOD_VF]] +; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[INDVARS_IV_US]], [[N_VEC]] +; CHECK-NEXT: [[TMP10:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = shl nuw nsw i64 [[TMP10]], 1 +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi [ insertelement ( shufflevector ( insertelement ( poison, double -0.000000e+00, i64 0), poison, zeroinitializer), double 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[INDVARS_IV_US]], [[INDEX]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, ptr [[VALUES:%.*]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP12]], align 8 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[IDX:%.*]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP13]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = sext [[WIDE_LOAD3]] to +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds double, ptr [[X:%.*]], [[TMP14]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = tail call @llvm.masked.gather.nxv2f64.nxv2p0( [[TMP15]], i32 8, shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer), poison) +; CHECK-NEXT: [[TMP16:%.*]] = fmul contract [[WIDE_LOAD]], [[WIDE_MASKED_GATHER]] +; CHECK-NEXT: [[TMP17]] = fadd contract [[VEC_PHI]], [[TMP16]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]] +; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[TMP19:%.*]] = tail call contract double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, [[TMP17]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP3_LOOPEXIT_US]], label [[FOR_BODY4_US_PREHEADER]] +; CHECK: for.body4.us.preheader: +; CHECK-NEXT: [[INDVARS_IV36_US_PH:%.*]] = phi i64 [ [[INDVARS_IV_US]], [[FOR_COND1_PREHEADER_US]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[TEMP_VALUE_030_US_PH:%.*]] = phi double [ 0.000000e+00, [[FOR_COND1_PREHEADER_US]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: br label [[FOR_BODY4_US:%.*]] +; CHECK: for.body4.us: +; CHECK-NEXT: [[INDVARS_IV36_US:%.*]] = phi i64 [ [[INDVARS_IV_NEXT37_US:%.*]], [[FOR_BODY4_US]] ], [ [[INDVARS_IV36_US_PH]], [[FOR_BODY4_US_PREHEADER]] ] +; CHECK-NEXT: [[TEMP_VALUE_030_US:%.*]] = phi double [ [[ADD9_US:%.*]], [[FOR_BODY4_US]] ], [ [[TEMP_VALUE_030_US_PH]], [[FOR_BODY4_US_PREHEADER]] ] +; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds double, ptr [[VALUES]], i64 [[INDVARS_IV36_US]] +; CHECK-NEXT: [[TMP20:%.*]] = load double, ptr [[ARRAYIDX_US]], align 8 +; CHECK-NEXT: [[ARRAYIDX6_US:%.*]] = getelementptr inbounds i32, ptr [[IDX]], i64 [[INDVARS_IV36_US]] +; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX6_US]], align 4 +; CHECK-NEXT: [[IDXPROM7_US:%.*]] = sext i32 [[TMP21]] to i64 +; CHECK-NEXT: [[ARRAYIDX8_US:%.*]] = getelementptr inbounds double, ptr [[X]], i64 [[IDXPROM7_US]] +; CHECK-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX8_US]], align 8 +; CHECK-NEXT: [[MUL_US:%.*]] = fmul contract double [[TMP20]], [[TMP22]] +; CHECK-NEXT: [[ADD9_US]] = fadd contract double [[TEMP_VALUE_030_US]], [[MUL_US]] +; CHECK-NEXT: [[INDVARS_IV_NEXT37_US]] = add nsw i64 [[INDVARS_IV36_US]], 1 +; CHECK-NEXT: [[CMP2_US:%.*]] = icmp slt i64 [[INDVARS_IV_NEXT37_US]], [[INDVARS_IV_NEXT_US]] +; CHECK-NEXT: br i1 [[CMP2_US]], label [[FOR_BODY4_US]], label [[FOR_COND_CLEANUP3_LOOPEXIT_US]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: for.cond.cleanup3.loopexit.us: +; CHECK-NEXT: [[ADD9_US_LCSSA:%.*]] = phi double [ [[TMP19]], [[MIDDLE_BLOCK]] ], [ [[ADD9_US]], [[FOR_BODY4_US]] ] +; CHECK-NEXT: [[ARRAYIDX12_US:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[INDVARS_IV39_US]] +; CHECK-NEXT: store double [[ADD9_US_LCSSA]], ptr [[ARRAYIDX12_US]], align 8 +; CHECK-NEXT: [[INDVARS_IV_NEXT40_US]] = add nuw nsw i64 [[INDVARS_IV39_US]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_US:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT40_US]], [[WIDE_TRIP_COUNT]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_US]], label [[FOR_COND_CLEANUP]], label [[FOR_COND1_PREHEADER_US]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + %cmp33 = icmp sgt i32 %nOut, 0 + br i1 %cmp33, label %for.cond1.preheader.preheader, label %for.cond.cleanup + +for.cond1.preheader.preheader: ; preds = %entry + %0 = sext i32 %nIn to i64 + %wide.trip.count = zext i32 %nOut to i64 + %cmp229 = icmp sgt i32 %nIn, 0 + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.cond.cleanup3 + %indvars.iv39 = phi i64 [ 0, %for.cond1.preheader.preheader ], [ %indvars.iv.next40, %for.cond.cleanup3 ] + %indvars.iv = phi i64 [ 0, %for.cond1.preheader.preheader ], [ %indvars.iv.next, %for.cond.cleanup3 ] + %indvars.iv.next = add nsw i64 %indvars.iv, %0 + br i1 %cmp229, label %for.body4, label %for.cond.cleanup3 + +for.cond.cleanup: ; preds = %for.cond.cleanup3, %entry + ret void + +for.cond.cleanup3: ; preds = %for.body4, %for.cond1.preheader + %temp_value.0.lcssa = phi double [ 0.000000e+00, %for.cond1.preheader ], [ %add9, %for.body4 ] + %arrayidx12 = getelementptr inbounds double, double* %b, i64 %indvars.iv39 + store double %temp_value.0.lcssa, double* %arrayidx12, align 8 + %indvars.iv.next40 = add nuw nsw i64 %indvars.iv39, 1 + %exitcond.not = icmp eq i64 %indvars.iv.next40, %wide.trip.count + br i1 %exitcond.not, label %for.cond.cleanup, label %for.cond1.preheader + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %indvars.iv36 = phi i64 [ %indvars.iv.next37, %for.body4 ], [ %indvars.iv, %for.cond1.preheader ] + %temp_value.030 = phi double [ %add9, %for.body4 ], [ 0.000000e+00, %for.cond1.preheader ] + %arrayidx = getelementptr inbounds double, double* %values, i64 %indvars.iv36 + %1 = load double, double* %arrayidx, align 8 + %arrayidx6 = getelementptr inbounds i32, i32* %idx, i64 %indvars.iv36 + %2 = load i32, i32* %arrayidx6, align 4 + %idxprom7 = sext i32 %2 to i64 + %arrayidx8 = getelementptr inbounds double, double* %x, i64 %idxprom7 + %3 = load double, double* %arrayidx8, align 8 + %mul = fmul contract double %1, %3 + %add9 = fadd contract double %temp_value.030, %mul + %indvars.iv.next37 = add nsw i64 %indvars.iv36, 1 + %cmp2 = icmp slt i64 %indvars.iv.next37, %indvars.iv.next + br i1 %cmp2, label %for.body4, label %for.cond.cleanup3 +} + +; int initial = 0; +; for(int i = 0; i < nOut; i++) { +; double temp_value = 0.0; +; for (int j = initial; j > initial - nIn; j--) { +; temp_value += values[j] * x[idx[j]]; +; } +; initial += nIn; +; b[i] = temp_value; +; } +; Function Attrs: nofree norecurse nosync nounwind uwtable vscale_range(1,16) +define dso_local void @testTCSub(i32 noundef %nOut, double* nocapture noundef readonly %x, double* nocapture noundef writeonly %b, double* nocapture noundef readonly %values, i32 noundef %nIn, i32* nocapture noundef readonly %idx) local_unnamed_addr #0 { +; CHECK-LABEL: @testTCSub( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CMP30:%.*]] = icmp sgt i32 [[NOUT:%.*]], 0 +; CHECK-NEXT: br i1 [[CMP30]], label [[FOR_COND1_PREHEADER_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; CHECK: for.cond1.preheader.preheader: +; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[NIN:%.*]] to i64 +; CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NOUT]] to i64 +; CHECK-NEXT: [[CMP227:%.*]] = icmp sgt i32 [[NIN]], 0 +; CHECK-NEXT: br i1 [[CMP227]], label [[FOR_COND1_PREHEADER_US_PREHEADER:%.*]], label [[FOR_COND1_PREHEADER_PREHEADER1:%.*]] +; CHECK: for.cond1.preheader.us.preheader: +; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 +; CHECK-NEXT: br label [[FOR_COND1_PREHEADER_US:%.*]] +; CHECK: for.cond1.preheader.preheader1: +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[WIDE_TRIP_COUNT]], 3 +; CHECK-NEXT: tail call void @llvm.memset.p0.i64(ptr align 8 [[B:%.*]], i8 0, i64 [[TMP3]], i1 false) +; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] +; CHECK: for.cond1.preheader.us: +; CHECK-NEXT: [[INDVARS_IV37_US:%.*]] = phi i64 [ [[INDVARS_IV_NEXT38_US:%.*]], [[FOR_COND_CLEANUP3_LOOPEXIT_US:%.*]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ] +; CHECK-NEXT: [[INDVARS_IV_US:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_US:%.*]], [[FOR_COND_CLEANUP3_LOOPEXIT_US]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ] +; CHECK-NEXT: [[TMP4:%.*]] = sub i64 [[INDVARS_IV_US]], [[TMP0]] +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDVARS_IV_US]], -1 +; CHECK-NEXT: [[SMIN:%.*]] = tail call i64 @llvm.smin.i64(i64 [[TMP4]], i64 [[TMP5]]) +; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[INDVARS_IV_US]], [[SMIN]] +; CHECK-NEXT: [[TMP7:%.*]] = sub nsw i64 [[INDVARS_IV_US]], [[TMP0]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP2]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[FOR_BODY4_US_PREHEADER:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[TMP8:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP8]], 1 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP6]], [[TMP9]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP6]], [[N_MOD_VF]] +; CHECK-NEXT: [[IND_END:%.*]] = sub i64 [[INDVARS_IV_US]], [[N_VEC]] +; CHECK-NEXT: [[TMP10:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = shl nuw nsw i64 [[TMP10]], 1 +; CHECK-NEXT: [[TMP12:%.*]] = sub nsw i64 1, [[TMP11]] +; CHECK-NEXT: [[TMP13:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP14:%.*]] = shl nuw nsw i64 [[TMP13]], 1 +; CHECK-NEXT: [[TMP15:%.*]] = sub nsw i64 1, [[TMP14]] +; CHECK-NEXT: [[TMP16:%.*]] = tail call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP17:%.*]] = shl nuw nsw i64 [[TMP16]], 1 +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi [ insertelement ( shufflevector ( insertelement ( poison, double -0.000000e+00, i64 0), poison, zeroinitializer), double 0.000000e+00, i32 0), [[VECTOR_PH]] ], [ [[TMP25:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[INDVARS_IV_US]], [[INDEX]] +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds double, ptr [[VALUES:%.*]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[TMP18]], i64 [[TMP12]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP19]], align 8 +; CHECK-NEXT: [[REVERSE:%.*]] = tail call @llvm.experimental.vector.reverse.nxv2f64( [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[IDX:%.*]], i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[TMP20]], i64 [[TMP15]] +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP21]], align 4 +; CHECK-NEXT: [[REVERSE4:%.*]] = tail call @llvm.experimental.vector.reverse.nxv2i32( [[WIDE_LOAD3]]) +; CHECK-NEXT: [[TMP22:%.*]] = sext [[REVERSE4]] to +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds double, ptr [[X:%.*]], [[TMP22]] +; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = tail call @llvm.masked.gather.nxv2f64.nxv2p0( [[TMP23]], i32 8, shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer), poison) +; CHECK-NEXT: [[TMP24:%.*]] = fmul contract [[REVERSE]], [[WIDE_MASKED_GATHER]] +; CHECK-NEXT: [[TMP25]] = fadd contract [[VEC_PHI]], [[TMP24]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP17]] +; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[TMP27:%.*]] = tail call contract double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, [[TMP25]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_MOD_VF]], 0 +; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP3_LOOPEXIT_US]], label [[FOR_BODY4_US_PREHEADER]] +; CHECK: for.body4.us.preheader: +; CHECK-NEXT: [[INDVARS_IV34_US_PH:%.*]] = phi i64 [ [[INDVARS_IV_US]], [[FOR_COND1_PREHEADER_US]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[TEMP_VALUE_028_US_PH:%.*]] = phi double [ 0.000000e+00, [[FOR_COND1_PREHEADER_US]] ], [ [[TMP27]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: br label [[FOR_BODY4_US:%.*]] +; CHECK: for.body4.us: +; CHECK-NEXT: [[INDVARS_IV34_US:%.*]] = phi i64 [ [[INDVARS_IV_NEXT35_US:%.*]], [[FOR_BODY4_US]] ], [ [[INDVARS_IV34_US_PH]], [[FOR_BODY4_US_PREHEADER]] ] +; CHECK-NEXT: [[TEMP_VALUE_028_US:%.*]] = phi double [ [[ADD_US:%.*]], [[FOR_BODY4_US]] ], [ [[TEMP_VALUE_028_US_PH]], [[FOR_BODY4_US_PREHEADER]] ] +; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds double, ptr [[VALUES]], i64 [[INDVARS_IV34_US]] +; CHECK-NEXT: [[TMP28:%.*]] = load double, ptr [[ARRAYIDX_US]], align 8 +; CHECK-NEXT: [[ARRAYIDX6_US:%.*]] = getelementptr inbounds i32, ptr [[IDX]], i64 [[INDVARS_IV34_US]] +; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX6_US]], align 4 +; CHECK-NEXT: [[IDXPROM7_US:%.*]] = sext i32 [[TMP29]] to i64 +; CHECK-NEXT: [[ARRAYIDX8_US:%.*]] = getelementptr inbounds double, ptr [[X]], i64 [[IDXPROM7_US]] +; CHECK-NEXT: [[TMP30:%.*]] = load double, ptr [[ARRAYIDX8_US]], align 8 +; CHECK-NEXT: [[MUL_US:%.*]] = fmul contract double [[TMP28]], [[TMP30]] +; CHECK-NEXT: [[ADD_US]] = fadd contract double [[TEMP_VALUE_028_US]], [[MUL_US]] +; CHECK-NEXT: [[INDVARS_IV_NEXT35_US]] = add nsw i64 [[INDVARS_IV34_US]], -1 +; CHECK-NEXT: [[CMP2_US:%.*]] = icmp sgt i64 [[INDVARS_IV_NEXT35_US]], [[TMP7]] +; CHECK-NEXT: br i1 [[CMP2_US]], label [[FOR_BODY4_US]], label [[FOR_COND_CLEANUP3_LOOPEXIT_US]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: for.cond.cleanup3.loopexit.us: +; CHECK-NEXT: [[ADD_US_LCSSA:%.*]] = phi double [ [[TMP27]], [[MIDDLE_BLOCK]] ], [ [[ADD_US]], [[FOR_BODY4_US]] ] +; CHECK-NEXT: [[INDVARS_IV_NEXT_US]] = add nsw i64 [[INDVARS_IV_US]], [[TMP0]] +; CHECK-NEXT: [[ARRAYIDX11_US:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[INDVARS_IV37_US]] +; CHECK-NEXT: store double [[ADD_US_LCSSA]], ptr [[ARRAYIDX11_US]], align 8 +; CHECK-NEXT: [[INDVARS_IV_NEXT38_US]] = add nuw nsw i64 [[INDVARS_IV37_US]], 1 +; CHECK-NEXT: [[EXITCOND_NOT_US:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT38_US]], [[WIDE_TRIP_COUNT]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT_US]], label [[FOR_COND_CLEANUP]], label [[FOR_COND1_PREHEADER_US]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + %cmp30 = icmp sgt i32 %nOut, 0 + br i1 %cmp30, label %for.cond1.preheader.preheader, label %for.cond.cleanup + +for.cond1.preheader.preheader: ; preds = %entry + %0 = sext i32 %nIn to i64 + %wide.trip.count = zext i32 %nOut to i64 + %cmp227 = icmp sgt i32 %nIn, 0 + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.cond.cleanup3 + %indvars.iv37 = phi i64 [ 0, %for.cond1.preheader.preheader ], [ %indvars.iv.next38, %for.cond.cleanup3 ] + %indvars.iv = phi i64 [ 0, %for.cond1.preheader.preheader ], [ %indvars.iv.next, %for.cond.cleanup3 ] + %1 = sub nsw i64 %indvars.iv, %0 + br i1 %cmp227, label %for.body4, label %for.cond.cleanup3 + +for.cond.cleanup: ; preds = %for.cond.cleanup3, %entry + ret void + +for.cond.cleanup3: ; preds = %for.body4, %for.cond1.preheader + %temp_value.0.lcssa = phi double [ 0.000000e+00, %for.cond1.preheader ], [ %add, %for.body4 ] + %indvars.iv.next = add nsw i64 %indvars.iv, %0 + %arrayidx11 = getelementptr inbounds double, double* %b, i64 %indvars.iv37 + store double %temp_value.0.lcssa, double* %arrayidx11, align 8 + %indvars.iv.next38 = add nuw nsw i64 %indvars.iv37, 1 + %exitcond.not = icmp eq i64 %indvars.iv.next38, %wide.trip.count + br i1 %exitcond.not, label %for.cond.cleanup, label %for.cond1.preheader + +for.body4: ; preds = %for.cond1.preheader, %for.body4 + %indvars.iv34 = phi i64 [ %indvars.iv.next35, %for.body4 ], [ %indvars.iv, %for.cond1.preheader ] + %temp_value.028 = phi double [ %add, %for.body4 ], [ 0.000000e+00, %for.cond1.preheader ] + %arrayidx = getelementptr inbounds double, double* %values, i64 %indvars.iv34 + %2 = load double, double* %arrayidx, align 8 + %arrayidx6 = getelementptr inbounds i32, i32* %idx, i64 %indvars.iv34 + %3 = load i32, i32* %arrayidx6, align 4 + %idxprom7 = sext i32 %3 to i64 + %arrayidx8 = getelementptr inbounds double, double* %x, i64 %idxprom7 + %4 = load double, double* %arrayidx8, align 8 + %mul = fmul contract double %2, %4 + %add = fadd contract double %temp_value.028, %mul + %indvars.iv.next35 = add nsw i64 %indvars.iv34, -1 + %cmp2 = icmp sgt i64 %indvars.iv.next35, %1 + br i1 %cmp2, label %for.body4, label %for.cond.cleanup3 +} + +attributes #0 = { nofree norecurse nosync nounwind uwtable vscale_range(1,16) "frame-pointer"="non-leaf" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon,+sve,+v8.2a" }