Index: llvm/test/Transforms/LoopVectorize/runtime-checks-hoist.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/LoopVectorize/runtime-checks-hoist.ll @@ -0,0 +1,604 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; REQUIRES: asserts +; RUN: opt < %s -p 'loop-vectorize' -force-vector-interleave=1 -S \ +; RUN: -force-vector-width=4 -debug-only=loop-accesses,loop-vectorize,loop-utils 2> %t | FileCheck %s +; RUN: cat %t | FileCheck %s --check-prefix=DEBUG + + +; Equivalent example in C: +; void diff_checks(int32_t *dst, int32_t *src, int m, int n) { +; for (int i = 0; i < m; i++) { +; for (int j = 0; j < n; j++) { +; dst[(i * (n + 1)) + j] = src[(i * n) + j]; +; } +; } +; } +; NOTE: The strides of the starting address values in the inner loop differ, i.e. +; '(i * (n + 1))' vs '(i * n)'. + +; DEBUG-LABEL: LAA: Found a loop in diff_checks: +; DEBUG-NOT: LAA: Adding RT check for range: + +define void @diff_checks(ptr nocapture noundef writeonly %dst, ptr nocapture noundef readonly %src, i32 noundef %m, i32 noundef %n) #0 { +; CHECK-LABEL: define void @diff_checks +; CHECK-SAME: (ptr nocapture noundef writeonly [[DST:%.*]], ptr nocapture noundef readonly [[SRC:%.*]], i32 noundef [[M:%.*]], i32 noundef [[N:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64 +; CHECK-NEXT: [[DST1:%.*]] = ptrtoint ptr [[DST]] to i64 +; CHECK-NEXT: [[ADD5:%.*]] = add nuw i32 [[N]], 1 +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[ADD5]] to i64 +; CHECK-NEXT: [[WIDE_M:%.*]] = zext i32 [[M]] to i64 +; CHECK-NEXT: [[WIDE_N:%.*]] = zext i32 [[N]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[WIDE_N]], 2 +; CHECK-NEXT: br label [[OUTER_LOOP:%.*]] +; CHECK: outer.loop: +; CHECK-NEXT: [[IV_OUTER:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_OUTER_NEXT:%.*]], [[INNER_EXIT:%.*]] ] +; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP2]], [[IV_OUTER]] +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[DST1]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP3]], [[IV_OUTER]] +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[SRC2]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = mul nsw i64 [[IV_OUTER]], [[TMP0]] +; CHECK-NEXT: [[TMP9:%.*]] = mul nsw i64 [[IV_OUTER]], [[TMP1]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; CHECK: vector.memcheck: +; CHECK-NEXT: [[TMP10:%.*]] = sub i64 [[TMP5]], [[TMP7]] +; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP10]], 16 +; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_N]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_N]], [[N_MOD_VF]] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP12:%.*]] = add nuw nsw i64 [[TMP11]], [[TMP8]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP14]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = add nsw i64 [[TMP11]], [[TMP9]] +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i32 0 +; CHECK-NEXT: store <4 x i32> [[WIDE_LOAD]], ptr [[TMP17]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[INNER_EXIT]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[OUTER_LOOP]] ], [ 0, [[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: br label [[INNER_LOOP:%.*]] +; CHECK: inner.loop: +; CHECK-NEXT: [[IV_INNER:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_INNER_NEXT:%.*]], [[INNER_LOOP]] ] +; CHECK-NEXT: [[TMP19:%.*]] = add nuw nsw i64 [[IV_INNER]], [[TMP8]] +; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP19]] +; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX_US]], align 4 +; CHECK-NEXT: [[TMP21:%.*]] = add nsw i64 [[IV_INNER]], [[TMP9]] +; CHECK-NEXT: [[ARRAYIDX9_US:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP21]] +; CHECK-NEXT: store i32 [[TMP20]], ptr [[ARRAYIDX9_US]], align 4 +; CHECK-NEXT: [[IV_INNER_NEXT]] = add nuw nsw i64 [[IV_INNER]], 1 +; CHECK-NEXT: [[INNER_EXIT_COND:%.*]] = icmp eq i64 [[IV_INNER_NEXT]], [[WIDE_N]] +; CHECK-NEXT: br i1 [[INNER_EXIT_COND]], label [[INNER_EXIT]], label [[INNER_LOOP]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: inner.exit: +; CHECK-NEXT: [[IV_OUTER_NEXT]] = add nuw nsw i64 [[IV_OUTER]], 1 +; CHECK-NEXT: [[OUTER_EXIT_COND:%.*]] = icmp eq i64 [[IV_OUTER_NEXT]], [[WIDE_M]] +; CHECK-NEXT: br i1 [[OUTER_EXIT_COND]], label [[OUTER_EXIT:%.*]], label [[OUTER_LOOP]] +; CHECK: outer.exit: +; CHECK-NEXT: ret void +; +entry: + %add5 = add nuw i32 %n, 1 + %0 = zext i32 %n to i64 + %1 = sext i32 %add5 to i64 + %wide.m = zext i32 %m to i64 + %wide.n = zext i32 %n to i64 + br label %outer.loop + +outer.loop: + %iv.outer = phi i64 [ 0, %entry ], [ %iv.outer.next, %inner.exit ] + %2 = mul nsw i64 %iv.outer, %0 + %3 = mul nsw i64 %iv.outer, %1 + br label %inner.loop + +inner.loop: + %iv.inner = phi i64 [ 0, %outer.loop ], [ %iv.inner.next, %inner.loop ] + %4 = add nuw nsw i64 %iv.inner, %2 + %arrayidx.us = getelementptr inbounds i32, ptr %src, i64 %4 + %5 = load i32, ptr %arrayidx.us, align 4 + %6 = add nsw i64 %iv.inner, %3 + %arrayidx9.us = getelementptr inbounds i32, ptr %dst, i64 %6 + store i32 %5, ptr %arrayidx9.us, align 4 + %iv.inner.next = add nuw nsw i64 %iv.inner, 1 + %inner.exit.cond = icmp eq i64 %iv.inner.next, %wide.n + br i1 %inner.exit.cond, label %inner.exit, label %inner.loop + +inner.exit: + %iv.outer.next = add nuw nsw i64 %iv.outer, 1 + %outer.exit.cond = icmp eq i64 %iv.outer.next, %wide.m + br i1 %outer.exit.cond, label %outer.exit, label %outer.loop + +outer.exit: + ret void +} + + +; Equivalent example in C: +; void full_checks(int32_t *dst, int32_t *src, int m, int n) { +; for (int i = 0; i < m; i++) { +; for (int j = 0; j < n; j++) { +; dst[(i * n) + j] += src[(i * n) + j]; +; } +; } +; } +; We decide to do full runtime checks here (as opposed to diff checks) due to +; the additional load of 'dst[(i * n) + j]' in the loop. + +; DEBUG-LABEL: LAA: Found a loop in full_checks: +; DEBUG: LAA: Adding RT check for range: +; DEBUG-NEXT: Start: {%dst,+,(4 * (zext i32 %n to i64))}<%outer.loop> End: {((4 * (zext i32 %n to i64)) + %dst),+,(4 * (zext i32 %n to i64))}<%outer.loop> +; DEBUG-NEXT: LAA: Adding RT check for range: +; DEBUG-NEXT: Start: {%src,+,(4 * (zext i32 %n to i64))}<%outer.loop> End: {((4 * (zext i32 %n to i64)) + %src),+,(4 * (zext i32 %n to i64))}<%outer.loop> + +define void @full_checks(ptr nocapture noundef %dst, ptr nocapture noundef readonly %src, i32 noundef %m, i32 noundef %n) #0 { +; CHECK-LABEL: define void @full_checks +; CHECK-SAME: (ptr nocapture noundef [[DST:%.*]], ptr nocapture noundef readonly [[SRC:%.*]], i32 noundef [[M:%.*]], i32 noundef [[N:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 +; CHECK-NEXT: [[WIDE_M:%.*]] = zext i32 [[M]] to i64 +; CHECK-NEXT: [[WIDE_N:%.*]] = zext i32 [[N]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[WIDE_N]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[WIDE_N]], 2 +; CHECK-NEXT: br label [[OUTER_LOOP:%.*]] +; CHECK: outer.loop: +; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[OUTER_IV_NEXT:%.*]], [[INNER_EXIT:%.*]] ] +; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP1]], [[OUTER_IV]] +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP4]] +; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]] +; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP5:%.*]] = mul nsw i64 [[OUTER_IV]], [[TMP0]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; CHECK: vector.memcheck: +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP3]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP2]], [[SCEVGEP1]] +; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_N]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_N]], [[N_MOD_VF]] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP7:%.*]] = add nuw nsw i64 [[TMP6]], [[TMP5]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 4, !alias.scope !4 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP10]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4, !alias.scope !7, !noalias !4 +; CHECK-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD4]], [[WIDE_LOAD]] +; CHECK-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP11]], align 4, !alias.scope !7, !noalias !4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[INNER_EXIT]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[OUTER_LOOP]] ], [ 0, [[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: br label [[INNER_LOOP:%.*]] +; CHECK: inner.loop: +; CHECK-NEXT: [[IV_INNER:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_INNER_NEXT:%.*]], [[INNER_LOOP]] ] +; CHECK-NEXT: [[TMP14:%.*]] = add nuw nsw i64 [[IV_INNER]], [[TMP5]] +; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX_US]], align 4 +; CHECK-NEXT: [[ARRAYIDX8_US:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX8_US]], align 4 +; CHECK-NEXT: [[ADD9_US:%.*]] = add nsw i32 [[TMP16]], [[TMP15]] +; CHECK-NEXT: store i32 [[ADD9_US]], ptr [[ARRAYIDX8_US]], align 4 +; CHECK-NEXT: [[IV_INNER_NEXT]] = add nuw nsw i64 [[IV_INNER]], 1 +; CHECK-NEXT: [[INNER_EXIT_COND:%.*]] = icmp eq i64 [[IV_INNER_NEXT]], [[WIDE_N]] +; CHECK-NEXT: br i1 [[INNER_EXIT_COND]], label [[INNER_EXIT]], label [[INNER_LOOP]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK: inner.exit: +; CHECK-NEXT: [[OUTER_IV_NEXT]] = add nuw nsw i64 [[OUTER_IV]], 1 +; CHECK-NEXT: [[OUTER_EXIT_COND:%.*]] = icmp eq i64 [[OUTER_IV_NEXT]], [[WIDE_M]] +; CHECK-NEXT: br i1 [[OUTER_EXIT_COND]], label [[OUTER_EXIT:%.*]], label [[OUTER_LOOP]] +; CHECK: outer.exit: +; CHECK-NEXT: ret void +; +entry: + %0 = zext i32 %n to i64 + %wide.m = zext i32 %m to i64 + %wide.n = zext i32 %n to i64 + br label %outer.loop + +outer.loop: + %outer.iv = phi i64 [ 0, %entry ], [ %outer.iv.next, %inner.exit ] + %1 = mul nsw i64 %outer.iv, %0 + br label %inner.loop + +inner.loop: + %iv.inner = phi i64 [ 0, %outer.loop ], [ %iv.inner.next, %inner.loop ] + %2 = add nuw nsw i64 %iv.inner, %1 + %arrayidx.us = getelementptr inbounds i32, ptr %src, i64 %2 + %3 = load i32, ptr %arrayidx.us, align 4 + %arrayidx8.us = getelementptr inbounds i32, ptr %dst, i64 %2 + %4 = load i32, ptr %arrayidx8.us, align 4 + %add9.us = add nsw i32 %4, %3 + store i32 %add9.us, ptr %arrayidx8.us, align 4 + %iv.inner.next = add nuw nsw i64 %iv.inner, 1 + %inner.exit.cond = icmp eq i64 %iv.inner.next, %wide.n + br i1 %inner.exit.cond, label %inner.exit, label %inner.loop + +inner.exit: + %outer.iv.next = add nuw nsw i64 %outer.iv, 1 + %outer.exit.cond = icmp eq i64 %outer.iv.next, %wide.m + br i1 %outer.exit.cond, label %outer.exit, label %outer.loop + +outer.exit: + ret void +} + + +; Equivalent example in C: +; void full_checks_diff_strides(int32_t *dst, int32_t *src, int m, int n) { +; for (int i = 0; i < m; i++) { +; for (int j = 0; j < n; j++) { +; dst[(i * (n + 1)) + j] += src[(i * n) + j]; +; } +; } +; } +; We decide to do full runtime checks here (as opposed to diff checks) due to +; the additional load of 'dst[(i * n) + j]' in the loop. +; NOTE: This is different to the test above (@full_checks) because the dst array +; is accessed with a higher stride compared src, and therefore the inner loop +; runtime checks will vary for each outer loop iteration. + +; DEBUG-LABEL: LAA: Found a loop in full_checks_diff_strides: +; DEBUG: LAA: Adding RT check for range: +; DEBUG-NEXT: Start: {%dst,+,(4 + (4 * (zext i32 %n to i64)))}<%outer.loop> End: {((4 * (zext i32 %n to i64)) + %dst),+,(4 + (4 * (zext i32 %n to i64)))}<%outer.loop> +; DEBUG-NEXT: LAA: Adding RT check for range: +; DEBUG-NEXT: Start: {%src,+,(4 * (zext i32 %n to i64))}<%outer.loop> End: {((4 * (zext i32 %n to i64)) + %src),+,(4 * (zext i32 %n to i64))}<%outer.loop> + + +define void @full_checks_diff_strides(ptr nocapture noundef %dst, ptr nocapture noundef readonly %src, i32 noundef %m, i32 noundef %n) #0 { +; CHECK-LABEL: define void @full_checks_diff_strides +; CHECK-SAME: (ptr nocapture noundef [[DST:%.*]], ptr nocapture noundef readonly [[SRC:%.*]], i32 noundef [[M:%.*]], i32 noundef [[N:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[WIDE_M:%.*]] = zext i32 [[M]] to i64 +; CHECK-NEXT: [[WIDE_N:%.*]] = zext i32 [[N]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = shl nuw nsw i64 [[WIDE_N]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[WIDE_N]], 2 +; CHECK-NEXT: br label [[OUTER_LOOP:%.*]] +; CHECK: outer.loop: +; CHECK-NEXT: [[OUTER_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[OUTER_IV_NEXT:%.*]], [[INNER_EXIT:%.*]] ] +; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP1]], [[OUTER_IV]] +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP0]], [[TMP3]] +; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP2]], [[OUTER_IV]] +; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP0]], [[TMP5]] +; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP6]] +; CHECK-NEXT: [[NPLUS1:%.*]] = add nuw nsw i32 [[N]], 1 +; CHECK-NEXT: [[WIDE_NPLUS1:%.*]] = zext i32 [[NPLUS1]] to i64 +; CHECK-NEXT: [[TMP7:%.*]] = mul nsw i64 [[OUTER_IV]], [[WIDE_N]] +; CHECK-NEXT: [[TMP8:%.*]] = mul nsw i64 [[OUTER_IV]], [[WIDE_NPLUS1]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; CHECK: vector.memcheck: +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP3]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP2]], [[SCEVGEP1]] +; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_N]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_N]], [[N_MOD_VF]] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[TMP9]], [[TMP7]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP11]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP12]], align 4, !alias.scope !11 +; CHECK-NEXT: [[TMP13:%.*]] = add nuw nsw i64 [[TMP9]], [[TMP8]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[TMP14]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i32>, ptr [[TMP15]], align 4, !alias.scope !14, !noalias !11 +; CHECK-NEXT: [[TMP16:%.*]] = add nsw <4 x i32> [[WIDE_LOAD4]], [[WIDE_LOAD]] +; CHECK-NEXT: store <4 x i32> [[TMP16]], ptr [[TMP15]], align 4, !alias.scope !14, !noalias !11 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[INNER_EXIT]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[OUTER_LOOP]] ], [ 0, [[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: br label [[INNER_LOOP:%.*]] +; CHECK: inner.loop: +; CHECK-NEXT: [[IV_INNER:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_INNER_NEXT:%.*]], [[INNER_LOOP]] ] +; CHECK-NEXT: [[TMP18:%.*]] = add nuw nsw i64 [[IV_INNER]], [[TMP7]] +; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP18]] +; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX_US]], align 4 +; CHECK-NEXT: [[TMP20:%.*]] = add nuw nsw i64 [[IV_INNER]], [[TMP8]] +; CHECK-NEXT: [[ARRAYIDX8_US:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP20]] +; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX8_US]], align 4 +; CHECK-NEXT: [[ADD9_US:%.*]] = add nsw i32 [[TMP21]], [[TMP19]] +; CHECK-NEXT: store i32 [[ADD9_US]], ptr [[ARRAYIDX8_US]], align 4 +; CHECK-NEXT: [[IV_INNER_NEXT]] = add nuw nsw i64 [[IV_INNER]], 1 +; CHECK-NEXT: [[INNER_EXIT_COND:%.*]] = icmp eq i64 [[IV_INNER_NEXT]], [[WIDE_N]] +; CHECK-NEXT: br i1 [[INNER_EXIT_COND]], label [[INNER_EXIT]], label [[INNER_LOOP]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK: inner.exit: +; CHECK-NEXT: [[OUTER_IV_NEXT]] = add nuw nsw i64 [[OUTER_IV]], 1 +; CHECK-NEXT: [[OUTER_EXIT_COND:%.*]] = icmp eq i64 [[OUTER_IV_NEXT]], [[WIDE_M]] +; CHECK-NEXT: br i1 [[OUTER_EXIT_COND]], label [[OUTER_EXIT:%.*]], label [[OUTER_LOOP]] +; CHECK: outer.exit: +; CHECK-NEXT: ret void +; +entry: + %wide.m = zext i32 %m to i64 + %wide.n = zext i32 %n to i64 + br label %outer.loop + +outer.loop: + %outer.iv = phi i64 [ 0, %entry ], [ %outer.iv.next, %inner.exit ] + %nplus1 = add nuw nsw i32 %n, 1 + %wide.nplus1 = zext i32 %nplus1 to i64 + %0 = mul nsw i64 %outer.iv, %wide.n + %1 = mul nsw i64 %outer.iv, %wide.nplus1 + br label %inner.loop + +inner.loop: + %iv.inner = phi i64 [ 0, %outer.loop ], [ %iv.inner.next, %inner.loop ] + %2 = add nuw nsw i64 %iv.inner, %0 + %arrayidx.us = getelementptr inbounds i32, ptr %src, i64 %2 + %3 = load i32, ptr %arrayidx.us, align 4 + %4 = add nuw nsw i64 %iv.inner, %1 + %arrayidx8.us = getelementptr inbounds i32, ptr %dst, i64 %4 + %5 = load i32, ptr %arrayidx8.us, align 4 + %add9.us = add nsw i32 %5, %3 + store i32 %add9.us, ptr %arrayidx8.us, align 4 + %iv.inner.next = add nuw nsw i64 %iv.inner, 1 + %inner.exit.cond = icmp eq i64 %iv.inner.next, %wide.n + br i1 %inner.exit.cond, label %inner.exit, label %inner.loop + +inner.exit: + %outer.iv.next = add nuw nsw i64 %outer.iv, 1 + %outer.exit.cond = icmp eq i64 %outer.iv.next, %wide.m + br i1 %outer.exit.cond, label %outer.exit, label %outer.loop + +outer.exit: + ret void +} + + +; Equivalent example in C: +; void diff_checks_src_start_invariant(int32_t *dst, int32_t *src, int m, int n) { +; for (int i = 0; i < m; i++) { +; for (int j = 0; j < n; j++) { +; dst[(i * n) + j] = src[j]; +; } +; } +; } + +; DEBUG-LABEL: LAA: Found a loop in diff_checks_src_start_invariant: + +define void @diff_checks_src_start_invariant(ptr nocapture noundef writeonly %dst, ptr nocapture noundef readonly %src, i32 noundef %m, i32 noundef %n) { +; CHECK-LABEL: define void @diff_checks_src_start_invariant +; CHECK-SAME: (ptr nocapture noundef writeonly [[DST:%.*]], ptr nocapture noundef readonly [[SRC:%.*]], i32 noundef [[M:%.*]], i32 noundef [[N:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64 +; CHECK-NEXT: [[DST1:%.*]] = ptrtoint ptr [[DST]] to i64 +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 +; CHECK-NEXT: [[WIDE_M:%.*]] = zext i32 [[M]] to i64 +; CHECK-NEXT: [[WIDE_N:%.*]] = zext i32 [[N]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[WIDE_N]], 2 +; CHECK-NEXT: br label [[OUTER_LOOP:%.*]] +; CHECK: outer.loop: +; CHECK-NEXT: [[IV_OUTER:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_OUTER_NEXT:%.*]], [[INNER_LOOP_EXIT:%.*]] ] +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], [[IV_OUTER]] +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[DST1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = mul nsw i64 [[IV_OUTER]], [[TMP0]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; CHECK: vector.memcheck: +; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[SRC2]] +; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP5]], 16 +; CHECK-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_N]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_N]], [[N_MOD_VF]] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = add nuw nsw i64 [[TMP6]], [[TMP4]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP10]], i32 0 +; CHECK-NEXT: store <4 x i32> [[WIDE_LOAD]], ptr [[TMP11]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[INNER_LOOP_EXIT]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[OUTER_LOOP]] ], [ 0, [[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: br label [[INNER_LOOP:%.*]] +; CHECK: inner.loop: +; CHECK-NEXT: [[IV_INNER:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_INNER_NEXT:%.*]], [[INNER_LOOP]] ] +; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV_INNER]] +; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX_US]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = add nuw nsw i64 [[IV_INNER]], [[TMP4]] +; CHECK-NEXT: [[ARRAYIDX6_US:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP14]] +; CHECK-NEXT: store i32 [[TMP13]], ptr [[ARRAYIDX6_US]], align 4 +; CHECK-NEXT: [[IV_INNER_NEXT]] = add nuw nsw i64 [[IV_INNER]], 1 +; CHECK-NEXT: [[INNER_EXIT_COND:%.*]] = icmp eq i64 [[IV_INNER_NEXT]], [[WIDE_N]] +; CHECK-NEXT: br i1 [[INNER_EXIT_COND]], label [[INNER_LOOP_EXIT]], label [[INNER_LOOP]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK: inner.loop.exit: +; CHECK-NEXT: [[IV_OUTER_NEXT]] = add nuw nsw i64 [[IV_OUTER]], 1 +; CHECK-NEXT: [[OUTER_EXIT_COND:%.*]] = icmp eq i64 [[IV_OUTER_NEXT]], [[WIDE_M]] +; CHECK-NEXT: br i1 [[OUTER_EXIT_COND]], label [[OUTER_LOOP_EXIT:%.*]], label [[OUTER_LOOP]] +; CHECK: outer.loop.exit: +; CHECK-NEXT: ret void +; +entry: + %0 = zext i32 %n to i64 + %wide.m = zext i32 %m to i64 + %wide.n = zext i32 %n to i64 + br label %outer.loop + +outer.loop: + %iv.outer = phi i64 [ 0, %entry ], [ %iv.outer.next, %inner.loop.exit ] + %1 = mul nsw i64 %iv.outer, %0 + br label %inner.loop + +inner.loop: + %iv.inner = phi i64 [ 0, %outer.loop ], [ %iv.inner.next, %inner.loop ] + %arrayidx.us = getelementptr inbounds i32, ptr %src, i64 %iv.inner + %2 = load i32, ptr %arrayidx.us, align 4 + %3 = add nuw nsw i64 %iv.inner, %1 + %arrayidx6.us = getelementptr inbounds i32, ptr %dst, i64 %3 + store i32 %2, ptr %arrayidx6.us, align 4 + %iv.inner.next = add nuw nsw i64 %iv.inner, 1 + %inner.exit.cond = icmp eq i64 %iv.inner.next, %wide.n + br i1 %inner.exit.cond, label %inner.loop.exit, label %inner.loop + +inner.loop.exit: + %iv.outer.next = add nuw nsw i64 %iv.outer, 1 + %outer.exit.cond = icmp eq i64 %iv.outer.next, %wide.m + br i1 %outer.exit.cond, label %outer.loop.exit, label %outer.loop + +outer.loop.exit: + ret void +} + + +; Equivalent example in C: +; void full_checks_src_start_invariant(int32_t *dst, int32_t *src, int m, int n) { +; for (int i = 0; i < m; i++) { +; for (int j = 0; j < n; j++) { +; dst[(i * n) + j] += src[j]; +; } +; } +; } + +; DEBUG-LABEL: LAA: Found a loop in full_checks_src_start_invariant: +; DEBUG: LAA: Adding RT check for range: +; DEBUG-NEXT: Start: {%dst,+,(4 * (zext i32 %n to i64))}<%outer.loop> End: {((4 * (zext i32 %n to i64)) + %dst),+,(4 * (zext i32 %n to i64))}<%outer.loop> +; DEBUG-NEXT: LAA: Adding RT check for range: +; DEBUG-NEXT: Start: %src End: ((4 * (zext i32 %n to i64)) + %src) + +define void @full_checks_src_start_invariant(ptr nocapture noundef %dst, ptr nocapture noundef readonly %src, i32 noundef %m, i32 noundef %n) { +; CHECK-LABEL: define void @full_checks_src_start_invariant +; CHECK-SAME: (ptr nocapture noundef [[DST:%.*]], ptr nocapture noundef readonly [[SRC:%.*]], i32 noundef [[M:%.*]], i32 noundef [[N:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 +; CHECK-NEXT: [[WIDE_M:%.*]] = zext i32 [[M]] to i64 +; CHECK-NEXT: [[WIDE_N:%.*]] = zext i32 [[N]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[WIDE_N]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[WIDE_N]], 2 +; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP2]] +; CHECK-NEXT: br label [[OUTER_LOOP:%.*]] +; CHECK: outer.loop: +; CHECK-NEXT: [[IV_OUTER:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_OUTER_NEXT:%.*]], [[INNER_LOOP_EXIT:%.*]] ] +; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP1]], [[IV_OUTER]] +; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP2]], [[TMP3]] +; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP5:%.*]] = mul nsw i64 [[IV_OUTER]], [[TMP0]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_N]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; CHECK: vector.memcheck: +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP1]] +; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_N]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_N]], [[N_MOD_VF]] +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP8]], align 4, !alias.scope !20 +; CHECK-NEXT: [[TMP9:%.*]] = add nuw nsw i64 [[TMP6]], [[TMP5]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP10]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4, !alias.scope !23, !noalias !20 +; CHECK-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD3]], [[WIDE_LOAD]] +; CHECK-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP11]], align 4, !alias.scope !23, !noalias !20 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_N]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[INNER_LOOP_EXIT]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[OUTER_LOOP]] ], [ 0, [[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: br label [[INNER_LOOP:%.*]] +; CHECK: inner.loop: +; CHECK-NEXT: [[IV_INNER:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_INNER_NEXT:%.*]], [[INNER_LOOP]] ] +; CHECK-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i64 [[IV_INNER]] +; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX_US]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = add nuw nsw i64 [[IV_INNER]], [[TMP5]] +; CHECK-NEXT: [[ARRAYIDX6_US:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX6_US]], align 4 +; CHECK-NEXT: [[ADD7_US:%.*]] = add nsw i32 [[TMP16]], [[TMP14]] +; CHECK-NEXT: store i32 [[ADD7_US]], ptr [[ARRAYIDX6_US]], align 4 +; CHECK-NEXT: [[IV_INNER_NEXT]] = add nuw nsw i64 [[IV_INNER]], 1 +; CHECK-NEXT: [[INNER_EXIT_COND:%.*]] = icmp eq i64 [[IV_INNER_NEXT]], [[WIDE_N]] +; CHECK-NEXT: br i1 [[INNER_EXIT_COND]], label [[INNER_LOOP_EXIT]], label [[INNER_LOOP]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK: inner.loop.exit: +; CHECK-NEXT: [[IV_OUTER_NEXT]] = add nuw nsw i64 [[IV_OUTER]], 1 +; CHECK-NEXT: [[OUTER_EXIT_COND:%.*]] = icmp eq i64 [[IV_OUTER_NEXT]], [[WIDE_M]] +; CHECK-NEXT: br i1 [[OUTER_EXIT_COND]], label [[OUTER_LOOP_EXIT:%.*]], label [[OUTER_LOOP]] +; CHECK: outer.loop.exit: +; CHECK-NEXT: ret void +; +entry: + %0 = zext i32 %n to i64 + %wide.m = zext i32 %m to i64 + %wide.n = zext i32 %n to i64 + br label %outer.loop + +outer.loop: + %iv.outer = phi i64 [ 0, %entry ], [ %iv.outer.next, %inner.loop.exit ] + %1 = mul nsw i64 %iv.outer, %0 + br label %inner.loop + +inner.loop: + %iv.inner = phi i64 [ 0, %outer.loop ], [ %iv.inner.next, %inner.loop ] + %arrayidx.us = getelementptr inbounds i32, ptr %src, i64 %iv.inner + %2 = load i32, ptr %arrayidx.us, align 4 + %3 = add nuw nsw i64 %iv.inner, %1 + %arrayidx6.us = getelementptr inbounds i32, ptr %dst, i64 %3 + %4 = load i32, ptr %arrayidx6.us, align 4 + %add7.us = add nsw i32 %4, %2 + store i32 %add7.us, ptr %arrayidx6.us, align 4 + %iv.inner.next = add nuw nsw i64 %iv.inner, 1 + %inner.exit.cond = icmp eq i64 %iv.inner.next, %wide.n + br i1 %inner.exit.cond, label %inner.loop.exit, label %inner.loop + +inner.loop.exit: + %iv.outer.next = add nuw nsw i64 %iv.outer, 1 + %outer.exit.cond = icmp eq i64 %iv.outer.next, %wide.m + br i1 %outer.exit.cond, label %outer.loop.exit, label %outer.loop + +outer.loop.exit: + ret void +}