Differential D106033 Diff 362226 clang/test/OpenMP/target_teams_distribute_simd_firstprivate_codegen.cpp
Changeset View
Changeset View
Standalone View
Standalone View
clang/test/OpenMP/target_teams_distribute_simd_firstprivate_codegen.cpp
- This file is larger than 256 KB, so syntax highlighting is disabled by default.
Show First 20 Lines • Show All 352 Lines • ▼ Show 20 Lines | |||||
// CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[SIVAR_CASTED]] to i32* | // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[SIVAR_CASTED]] to i32* | ||||
// CHECK1-NEXT: store i32 [[TMP5]], i32* [[CONV3]], align 4 | // CHECK1-NEXT: store i32 [[TMP5]], i32* [[CONV3]], align 4 | ||||
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[SIVAR_CASTED]], align 8 | // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[SIVAR_CASTED]], align 8 | ||||
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S]*, %struct.S*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i64 [[TMP4]], [2 x %struct.S]* [[TMP1]], %struct.S* [[TMP2]], i64 [[TMP6]]) | // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S]*, %struct.S*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i64 [[TMP4]], [2 x %struct.S]* [[TMP1]], %struct.S* [[TMP2]], i64 [[TMP6]]) | ||||
// CHECK1-NEXT: ret void | // CHECK1-NEXT: ret void | ||||
// | // | ||||
// | // | ||||
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. | // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. | ||||
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR5:[0-9]+]] { | // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i64 [[SIVAR:%.*]]) #[[ATTR5:[0-9]+]] { | ||||
// CHECK1-NEXT: entry: | // CHECK1-NEXT: entry: | ||||
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 | // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 | ||||
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 | // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 | ||||
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8 | // CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8 | ||||
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i64, align 8 | // CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i64, align 8 | ||||
// CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8 | // CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8 | ||||
// CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8 | // CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8 | ||||
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8 | // CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8 | ||||
▲ Show 20 Lines • Show All 60 Lines • ▼ Show 20 Lines | |||||
// CHECK1-NEXT: br label [[COND_END]] | // CHECK1-NEXT: br label [[COND_END]] | ||||
// CHECK1: cond.end: | // CHECK1: cond.end: | ||||
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ] | // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ] | ||||
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | ||||
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | ||||
// CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4 | // CHECK1-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4 | ||||
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | ||||
// CHECK1: omp.inner.for.cond: | // CHECK1: omp.inner.for.cond: | ||||
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | ||||
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !5 | ||||
// CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] | // CHECK1-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] | ||||
// CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | // CHECK1-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | ||||
// CHECK1: omp.inner.for.cond.cleanup: | // CHECK1: omp.inner.for.cond.cleanup: | ||||
// CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | ||||
// CHECK1: omp.inner.for.body: | // CHECK1: omp.inner.for.body: | ||||
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | ||||
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1 | // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1 | ||||
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | ||||
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !5 | ||||
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[T_VAR]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !5 | ||||
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 | ||||
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64 | // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64 | ||||
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]] | // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC2]], i64 0, i64 [[IDXPROM]] | ||||
// CHECK1-NEXT: store i32 [[TMP23]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: store i32 [[TMP15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !5 | ||||
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 | ||||
// CHECK1-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP25]] to i64 | // CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP17]] to i64 | ||||
// CHECK1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 [[IDXPROM4]] | // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR3]], i64 0, i64 [[IDXPROM8]] | ||||
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast %struct.S* [[ARRAYIDX5]] to i8* | // CHECK1-NEXT: [[TMP18:%.*]] = bitcast %struct.S* [[ARRAYIDX9]] to i8* | ||||
// CHECK1-NEXT: [[TMP27:%.*]] = bitcast %struct.S* [[VAR]] to i8* | // CHECK1-NEXT: [[TMP19:%.*]] = bitcast %struct.S* [[VAR5]] to i8* | ||||
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP26]], i8* align 4 [[TMP27]], i64 4, i1 false), !llvm.access.group !5 | // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i64 4, i1 false), !llvm.access.group !5 | ||||
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 | ||||
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[CONV1]], align 8, !llvm.access.group !5 | ||||
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP28]] | // CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP21]], [[TMP20]] | ||||
// CHECK1-NEXT: store i32 [[ADD6]], i32* [[SIVAR]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: store i32 [[ADD10]], i32* [[CONV1]], align 8, !llvm.access.group !5 | ||||
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | ||||
// CHECK1: omp.body.continue: | // CHECK1: omp.body.continue: | ||||
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | ||||
// CHECK1: omp.inner.for.inc: | // CHECK1: omp.inner.for.inc: | ||||
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | ||||
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP30]], 1 | // CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], 1 | ||||
// CHECK1-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | // CHECK1-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | ||||
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] | // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] | ||||
// CHECK1: omp.inner.for.end: | // CHECK1: omp.inner.for.end: | ||||
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | ||||
// CHECK1: omp.loop.exit: | // CHECK1: omp.loop.exit: | ||||
// CHECK1-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 | // CHECK1-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 | ||||
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | ||||
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | ||||
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | ||||
▲ Show 20 Lines • Show All 227 Lines • ▼ Show 20 Lines | |||||
// CHECK1-NEXT: store i32 [[TMP3]], i32* [[CONV1]], align 4 | // CHECK1-NEXT: store i32 [[TMP3]], i32* [[CONV1]], align 4 | ||||
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8 | // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8 | ||||
// CHECK1-NEXT: [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8 | // CHECK1-NEXT: [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8 | ||||
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i64 [[TMP4]], [2 x %struct.S.0]* [[TMP1]], %struct.S.0* [[TMP5]]) | // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i64 [[TMP4]], [2 x %struct.S.0]* [[TMP1]], %struct.S.0* [[TMP5]]) | ||||
// CHECK1-NEXT: ret void | // CHECK1-NEXT: ret void | ||||
// | // | ||||
// | // | ||||
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3 | // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3 | ||||
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias [[__CONTEXT:%.*]]) #[[ATTR5]] { | // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR5]] { | ||||
// CHECK1-NEXT: entry: | // CHECK1-NEXT: entry: | ||||
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 | // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 | ||||
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 | // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 | ||||
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8 | // CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8 | ||||
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i64, align 8 | // CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i64, align 8 | ||||
// CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8 | // CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8 | ||||
// CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8 | // CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8 | ||||
// CHECK1-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 8 | // CHECK1-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 8 | ||||
▲ Show 20 Lines • Show All 62 Lines • ▼ Show 20 Lines | |||||
// CHECK1-NEXT: br label [[COND_END]] | // CHECK1-NEXT: br label [[COND_END]] | ||||
// CHECK1: cond.end: | // CHECK1: cond.end: | ||||
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] | // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] | ||||
// CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | ||||
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | ||||
// CHECK1-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4 | // CHECK1-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4 | ||||
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | ||||
// CHECK1: omp.inner.for.cond: | // CHECK1: omp.inner.for.cond: | ||||
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | ||||
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 | // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 | ||||
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]] | // CHECK1-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] | ||||
// CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | // CHECK1-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | ||||
// CHECK1: omp.inner.for.cond.cleanup: | // CHECK1: omp.inner.for.cond.cleanup: | ||||
// CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | ||||
// CHECK1: omp.inner.for.body: | // CHECK1: omp.inner.for.body: | ||||
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | ||||
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1 | // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 | ||||
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | ||||
// CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 | // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 | ||||
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[T_VAR]], align 4, !llvm.access.group !11 | // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !11 | ||||
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11 | // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11 | ||||
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64 | // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 | ||||
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]] | // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC2]], i64 0, i64 [[IDXPROM]] | ||||
// CHECK1-NEXT: store i32 [[TMP22]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !11 | // CHECK1-NEXT: store i32 [[TMP16]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !11 | ||||
// CHECK1-NEXT: [[TMP24:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP4]], align 8, !llvm.access.group !11 | // CHECK1-NEXT: [[TMP18:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP7]], align 8, !llvm.access.group !11 | ||||
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11 | // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11 | ||||
// CHECK1-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64 | // CHECK1-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP19]] to i64 | ||||
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 [[IDXPROM6]] | // CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i64 0, i64 [[IDXPROM9]] | ||||
// CHECK1-NEXT: [[TMP26:%.*]] = bitcast %struct.S.0* [[ARRAYIDX7]] to i8* | // CHECK1-NEXT: [[TMP20:%.*]] = bitcast %struct.S.0* [[ARRAYIDX10]] to i8* | ||||
// CHECK1-NEXT: [[TMP27:%.*]] = bitcast %struct.S.0* [[TMP24]] to i8* | // CHECK1-NEXT: [[TMP21:%.*]] = bitcast %struct.S.0* [[TMP18]] to i8* | ||||
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP26]], i8* align 4 [[TMP27]], i64 4, i1 false), !llvm.access.group !11 | // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP20]], i8* align 4 [[TMP21]], i64 4, i1 false), !llvm.access.group !11 | ||||
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | ||||
// CHECK1: omp.body.continue: | // CHECK1: omp.body.continue: | ||||
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | ||||
// CHECK1: omp.inner.for.inc: | // CHECK1: omp.inner.for.inc: | ||||
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | ||||
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP28]], 1 | // CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], 1 | ||||
// CHECK1-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | // CHECK1-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | ||||
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] | // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] | ||||
// CHECK1: omp.inner.for.end: | // CHECK1: omp.inner.for.end: | ||||
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | ||||
// CHECK1: omp.loop.exit: | // CHECK1: omp.loop.exit: | ||||
// CHECK1-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 | // CHECK1-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 | ||||
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | ||||
// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | ||||
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | // CHECK1-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | ||||
▲ Show 20 Lines • Show All 326 Lines • ▼ Show 20 Lines | |||||
// CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[SIVAR_CASTED]] to i32* | // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[SIVAR_CASTED]] to i32* | ||||
// CHECK2-NEXT: store i32 [[TMP5]], i32* [[CONV3]], align 4 | // CHECK2-NEXT: store i32 [[TMP5]], i32* [[CONV3]], align 4 | ||||
// CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[SIVAR_CASTED]], align 8 | // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[SIVAR_CASTED]], align 8 | ||||
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S]*, %struct.S*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i64 [[TMP4]], [2 x %struct.S]* [[TMP1]], %struct.S* [[TMP2]], i64 [[TMP6]]) | // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S]*, %struct.S*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i64 [[TMP4]], [2 x %struct.S]* [[TMP1]], %struct.S* [[TMP2]], i64 [[TMP6]]) | ||||
// CHECK2-NEXT: ret void | // CHECK2-NEXT: ret void | ||||
// | // | ||||
// | // | ||||
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. | // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. | ||||
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR5:[0-9]+]] { | // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i64 [[SIVAR:%.*]]) #[[ATTR5:[0-9]+]] { | ||||
// CHECK2-NEXT: entry: | // CHECK2-NEXT: entry: | ||||
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 | // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 | ||||
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 | // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 | ||||
// CHECK2-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8 | // CHECK2-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8 | ||||
// CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca i64, align 8 | // CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca i64, align 8 | ||||
// CHECK2-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8 | // CHECK2-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8 | ||||
// CHECK2-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8 | // CHECK2-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8 | ||||
// CHECK2-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8 | // CHECK2-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8 | ||||
▲ Show 20 Lines • Show All 60 Lines • ▼ Show 20 Lines | |||||
// CHECK2-NEXT: br label [[COND_END]] | // CHECK2-NEXT: br label [[COND_END]] | ||||
// CHECK2: cond.end: | // CHECK2: cond.end: | ||||
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ] | // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ] | ||||
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | ||||
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | ||||
// CHECK2-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4 | // CHECK2-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4 | ||||
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | ||||
// CHECK2: omp.inner.for.cond: | // CHECK2: omp.inner.for.cond: | ||||
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | ||||
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !5 | ||||
// CHECK2-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] | // CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] | ||||
// CHECK2-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | // CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | ||||
// CHECK2: omp.inner.for.cond.cleanup: | // CHECK2: omp.inner.for.cond.cleanup: | ||||
// CHECK2-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | // CHECK2-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | ||||
// CHECK2: omp.inner.for.body: | // CHECK2: omp.inner.for.body: | ||||
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | ||||
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1 | // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1 | ||||
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | ||||
// CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !5 | ||||
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[T_VAR]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !5 | ||||
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 | ||||
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64 | // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64 | ||||
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]] | // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC2]], i64 0, i64 [[IDXPROM]] | ||||
// CHECK2-NEXT: store i32 [[TMP23]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: store i32 [[TMP15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !5 | ||||
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 | ||||
// CHECK2-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP25]] to i64 | // CHECK2-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP17]] to i64 | ||||
// CHECK2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 [[IDXPROM4]] | // CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR3]], i64 0, i64 [[IDXPROM8]] | ||||
// CHECK2-NEXT: [[TMP26:%.*]] = bitcast %struct.S* [[ARRAYIDX5]] to i8* | // CHECK2-NEXT: [[TMP18:%.*]] = bitcast %struct.S* [[ARRAYIDX9]] to i8* | ||||
// CHECK2-NEXT: [[TMP27:%.*]] = bitcast %struct.S* [[VAR]] to i8* | // CHECK2-NEXT: [[TMP19:%.*]] = bitcast %struct.S* [[VAR5]] to i8* | ||||
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP26]], i8* align 4 [[TMP27]], i64 4, i1 false), !llvm.access.group !5 | // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i64 4, i1 false), !llvm.access.group !5 | ||||
// CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 | ||||
// CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[CONV1]], align 8, !llvm.access.group !5 | ||||
// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP28]] | // CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP21]], [[TMP20]] | ||||
// CHECK2-NEXT: store i32 [[ADD6]], i32* [[SIVAR]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: store i32 [[ADD10]], i32* [[CONV1]], align 8, !llvm.access.group !5 | ||||
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | ||||
// CHECK2: omp.body.continue: | // CHECK2: omp.body.continue: | ||||
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | ||||
// CHECK2: omp.inner.for.inc: | // CHECK2: omp.inner.for.inc: | ||||
// CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | ||||
// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP30]], 1 | // CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], 1 | ||||
// CHECK2-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | // CHECK2-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 | ||||
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] | // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] | ||||
// CHECK2: omp.inner.for.end: | // CHECK2: omp.inner.for.end: | ||||
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | ||||
// CHECK2: omp.loop.exit: | // CHECK2: omp.loop.exit: | ||||
// CHECK2-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 | // CHECK2-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 | ||||
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | ||||
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | ||||
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | ||||
▲ Show 20 Lines • Show All 227 Lines • ▼ Show 20 Lines | |||||
// CHECK2-NEXT: store i32 [[TMP3]], i32* [[CONV1]], align 4 | // CHECK2-NEXT: store i32 [[TMP3]], i32* [[CONV1]], align 4 | ||||
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8 | // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8 | ||||
// CHECK2-NEXT: [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8 | // CHECK2-NEXT: [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 8 | ||||
// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i64 [[TMP4]], [2 x %struct.S.0]* [[TMP1]], %struct.S.0* [[TMP5]]) | // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i64 [[TMP4]], [2 x %struct.S.0]* [[TMP1]], %struct.S.0* [[TMP5]]) | ||||
// CHECK2-NEXT: ret void | // CHECK2-NEXT: ret void | ||||
// | // | ||||
// | // | ||||
// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3 | // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3 | ||||
// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias [[__CONTEXT:%.*]]) #[[ATTR5]] { | // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR5]] { | ||||
// CHECK2-NEXT: entry: | // CHECK2-NEXT: entry: | ||||
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 | // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 | ||||
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 | // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 | ||||
// CHECK2-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8 | // CHECK2-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8 | ||||
// CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca i64, align 8 | // CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca i64, align 8 | ||||
// CHECK2-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8 | // CHECK2-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8 | ||||
// CHECK2-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8 | // CHECK2-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8 | ||||
// CHECK2-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 8 | // CHECK2-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 8 | ||||
▲ Show 20 Lines • Show All 62 Lines • ▼ Show 20 Lines | |||||
// CHECK2-NEXT: br label [[COND_END]] | // CHECK2-NEXT: br label [[COND_END]] | ||||
// CHECK2: cond.end: | // CHECK2: cond.end: | ||||
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] | // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] | ||||
// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | ||||
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | ||||
// CHECK2-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4 | // CHECK2-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4 | ||||
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | ||||
// CHECK2: omp.inner.for.cond: | // CHECK2: omp.inner.for.cond: | ||||
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | ||||
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 | // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 | ||||
// CHECK2-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]] | // CHECK2-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] | ||||
// CHECK2-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | // CHECK2-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | ||||
// CHECK2: omp.inner.for.cond.cleanup: | // CHECK2: omp.inner.for.cond.cleanup: | ||||
// CHECK2-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | // CHECK2-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | ||||
// CHECK2: omp.inner.for.body: | // CHECK2: omp.inner.for.body: | ||||
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | ||||
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1 | // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 | ||||
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | ||||
// CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 | // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 | ||||
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[T_VAR]], align 4, !llvm.access.group !11 | // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !11 | ||||
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11 | // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11 | ||||
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64 | // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 | ||||
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 [[IDXPROM]] | // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC2]], i64 0, i64 [[IDXPROM]] | ||||
// CHECK2-NEXT: store i32 [[TMP22]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !11 | // CHECK2-NEXT: store i32 [[TMP16]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !11 | ||||
// CHECK2-NEXT: [[TMP24:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP4]], align 8, !llvm.access.group !11 | // CHECK2-NEXT: [[TMP18:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP7]], align 8, !llvm.access.group !11 | ||||
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11 | // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11 | ||||
// CHECK2-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP25]] to i64 | // CHECK2-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP19]] to i64 | ||||
// CHECK2-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 [[IDXPROM6]] | // CHECK2-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i64 0, i64 [[IDXPROM9]] | ||||
// CHECK2-NEXT: [[TMP26:%.*]] = bitcast %struct.S.0* [[ARRAYIDX7]] to i8* | // CHECK2-NEXT: [[TMP20:%.*]] = bitcast %struct.S.0* [[ARRAYIDX10]] to i8* | ||||
// CHECK2-NEXT: [[TMP27:%.*]] = bitcast %struct.S.0* [[TMP24]] to i8* | // CHECK2-NEXT: [[TMP21:%.*]] = bitcast %struct.S.0* [[TMP18]] to i8* | ||||
// CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP26]], i8* align 4 [[TMP27]], i64 4, i1 false), !llvm.access.group !11 | // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP20]], i8* align 4 [[TMP21]], i64 4, i1 false), !llvm.access.group !11 | ||||
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | ||||
// CHECK2: omp.body.continue: | // CHECK2: omp.body.continue: | ||||
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | ||||
// CHECK2: omp.inner.for.inc: | // CHECK2: omp.inner.for.inc: | ||||
// CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | ||||
// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP28]], 1 | // CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], 1 | ||||
// CHECK2-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | // CHECK2-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 | ||||
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] | // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] | ||||
// CHECK2: omp.inner.for.end: | // CHECK2: omp.inner.for.end: | ||||
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | ||||
// CHECK2: omp.loop.exit: | // CHECK2: omp.loop.exit: | ||||
// CHECK2-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 | // CHECK2-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 | ||||
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | ||||
// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | ||||
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | ||||
▲ Show 20 Lines • Show All 320 Lines • ▼ Show 20 Lines | |||||
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[SIVAR_ADDR]], align 4 | // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[SIVAR_ADDR]], align 4 | ||||
// CHECK3-NEXT: store i32 [[TMP5]], i32* [[SIVAR_CASTED]], align 4 | // CHECK3-NEXT: store i32 [[TMP5]], i32* [[SIVAR_CASTED]], align 4 | ||||
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[SIVAR_CASTED]], align 4 | // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[SIVAR_CASTED]], align 4 | ||||
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32, [2 x %struct.S]*, %struct.S*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i32 [[TMP4]], [2 x %struct.S]* [[TMP1]], %struct.S* [[TMP2]], i32 [[TMP6]]) | // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32, [2 x %struct.S]*, %struct.S*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i32 [[TMP4]], [2 x %struct.S]* [[TMP1]], %struct.S* [[TMP2]], i32 [[TMP6]]) | ||||
// CHECK3-NEXT: ret void | // CHECK3-NEXT: ret void | ||||
// | // | ||||
// | // | ||||
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined. | // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined. | ||||
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR5:[0-9]+]] { | // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i32 [[SIVAR:%.*]]) #[[ATTR5:[0-9]+]] { | ||||
// CHECK3-NEXT: entry: | // CHECK3-NEXT: entry: | ||||
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 | // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 | ||||
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 | // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 | ||||
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4 | // CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4 | ||||
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32, align 4 | // CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32, align 4 | ||||
// CHECK3-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 4 | // CHECK3-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 4 | ||||
// CHECK3-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 4 | // CHECK3-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 4 | ||||
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32, align 4 | // CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32, align 4 | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | |||||
// CHECK3-NEXT: br label [[COND_END]] | // CHECK3-NEXT: br label [[COND_END]] | ||||
// CHECK3: cond.end: | // CHECK3: cond.end: | ||||
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ] | // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ] | ||||
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | ||||
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | ||||
// CHECK3-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4 | // CHECK3-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4 | ||||
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | ||||
// CHECK3: omp.inner.for.cond: | // CHECK3: omp.inner.for.cond: | ||||
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] | // CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] | ||||
// CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | // CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | ||||
// CHECK3: omp.inner.for.cond.cleanup: | // CHECK3: omp.inner.for.cond.cleanup: | ||||
// CHECK3-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | // CHECK3-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | ||||
// CHECK3: omp.inner.for.body: | // CHECK3: omp.inner.for.body: | ||||
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1 | // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1 | ||||
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | ||||
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[T_VAR]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[T_VAR_ADDR]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i32 0, i32 [[TMP24]] | // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC1]], i32 0, i32 [[TMP16]] | ||||
// CHECK3-NEXT: store i32 [[TMP23]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: store i32 [[TMP15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 [[TMP25]] | // CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR2]], i32 0, i32 [[TMP17]] | ||||
// CHECK3-NEXT: [[TMP26:%.*]] = bitcast %struct.S* [[ARRAYIDX4]] to i8* | // CHECK3-NEXT: [[TMP18:%.*]] = bitcast %struct.S* [[ARRAYIDX7]] to i8* | ||||
// CHECK3-NEXT: [[TMP27:%.*]] = bitcast %struct.S* [[VAR]] to i8* | // CHECK3-NEXT: [[TMP19:%.*]] = bitcast %struct.S* [[VAR4]] to i8* | ||||
// CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP26]], i8* align 4 [[TMP27]], i32 4, i1 false), !llvm.access.group !6 | // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i32 4, i1 false), !llvm.access.group !6 | ||||
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[SIVAR_ADDR]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP29]], [[TMP28]] | // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP21]], [[TMP20]] | ||||
// CHECK3-NEXT: store i32 [[ADD5]], i32* [[SIVAR]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: store i32 [[ADD8]], i32* [[SIVAR_ADDR]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | ||||
// CHECK3: omp.body.continue: | // CHECK3: omp.body.continue: | ||||
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | ||||
// CHECK3: omp.inner.for.inc: | // CHECK3: omp.inner.for.inc: | ||||
// CHECK3-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP30]], 1 | // CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], 1 | ||||
// CHECK3-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | // CHECK3-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | ||||
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] | // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] | ||||
// CHECK3: omp.inner.for.end: | // CHECK3: omp.inner.for.end: | ||||
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | ||||
// CHECK3: omp.loop.exit: | // CHECK3: omp.loop.exit: | ||||
// CHECK3-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 | // CHECK3-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 | ||||
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | ||||
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | ||||
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | // CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | ||||
▲ Show 20 Lines • Show All 224 Lines • ▼ Show 20 Lines | |||||
// CHECK3-NEXT: store i32 [[TMP3]], i32* [[T_VAR_CASTED]], align 4 | // CHECK3-NEXT: store i32 [[TMP3]], i32* [[T_VAR_CASTED]], align 4 | ||||
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4 | // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4 | ||||
// CHECK3-NEXT: [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4 | // CHECK3-NEXT: [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4 | ||||
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i32 [[TMP4]], [2 x %struct.S.0]* [[TMP1]], %struct.S.0* [[TMP5]]) | // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i32 [[TMP4]], [2 x %struct.S.0]* [[TMP1]], %struct.S.0* [[TMP5]]) | ||||
// CHECK3-NEXT: ret void | // CHECK3-NEXT: ret void | ||||
// | // | ||||
// | // | ||||
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3 | // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3 | ||||
// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias [[__CONTEXT:%.*]]) #[[ATTR5]] { | // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR5]] { | ||||
// CHECK3-NEXT: entry: | // CHECK3-NEXT: entry: | ||||
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 | // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 | ||||
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 | // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 | ||||
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4 | // CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4 | ||||
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32, align 4 | // CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32, align 4 | ||||
// CHECK3-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 4 | // CHECK3-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 4 | ||||
// CHECK3-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 4 | // CHECK3-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 4 | ||||
// CHECK3-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 4 | // CHECK3-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 4 | ||||
▲ Show 20 Lines • Show All 61 Lines • ▼ Show 20 Lines | |||||
// CHECK3-NEXT: br label [[COND_END]] | // CHECK3-NEXT: br label [[COND_END]] | ||||
// CHECK3: cond.end: | // CHECK3: cond.end: | ||||
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] | // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] | ||||
// CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | ||||
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | ||||
// CHECK3-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4 | // CHECK3-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4 | ||||
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | ||||
// CHECK3: omp.inner.for.cond: | // CHECK3: omp.inner.for.cond: | ||||
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | ||||
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 | // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 | ||||
// CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]] | // CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] | ||||
// CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | // CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | ||||
// CHECK3: omp.inner.for.cond.cleanup: | // CHECK3: omp.inner.for.cond.cleanup: | ||||
// CHECK3-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | // CHECK3-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | ||||
// CHECK3: omp.inner.for.body: | // CHECK3: omp.inner.for.body: | ||||
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | ||||
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1 | // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 | ||||
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | ||||
// CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 | // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 | ||||
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[T_VAR]], align 4, !llvm.access.group !12 | // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[T_VAR_ADDR]], align 4, !llvm.access.group !12 | ||||
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 | // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 | ||||
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i32 0, i32 [[TMP23]] | // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC2]], i32 0, i32 [[TMP17]] | ||||
// CHECK3-NEXT: store i32 [[TMP22]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !12 | // CHECK3-NEXT: store i32 [[TMP16]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !12 | ||||
// CHECK3-NEXT: [[TMP24:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP4]], align 4, !llvm.access.group !12 | // CHECK3-NEXT: [[TMP18:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP7]], align 4, !llvm.access.group !12 | ||||
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 | // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 | ||||
// CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 [[TMP25]] | // CHECK3-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i32 0, i32 [[TMP19]] | ||||
// CHECK3-NEXT: [[TMP26:%.*]] = bitcast %struct.S.0* [[ARRAYIDX6]] to i8* | // CHECK3-NEXT: [[TMP20:%.*]] = bitcast %struct.S.0* [[ARRAYIDX9]] to i8* | ||||
// CHECK3-NEXT: [[TMP27:%.*]] = bitcast %struct.S.0* [[TMP24]] to i8* | // CHECK3-NEXT: [[TMP21:%.*]] = bitcast %struct.S.0* [[TMP18]] to i8* | ||||
// CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP26]], i8* align 4 [[TMP27]], i32 4, i1 false), !llvm.access.group !12 | // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP20]], i8* align 4 [[TMP21]], i32 4, i1 false), !llvm.access.group !12 | ||||
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | ||||
// CHECK3: omp.body.continue: | // CHECK3: omp.body.continue: | ||||
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | ||||
// CHECK3: omp.inner.for.inc: | // CHECK3: omp.inner.for.inc: | ||||
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | ||||
// CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP28]], 1 | // CHECK3-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP22]], 1 | ||||
// CHECK3-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | // CHECK3-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | ||||
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] | // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] | ||||
// CHECK3: omp.inner.for.end: | // CHECK3: omp.inner.for.end: | ||||
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | ||||
// CHECK3: omp.loop.exit: | // CHECK3: omp.loop.exit: | ||||
// CHECK3-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 | // CHECK3-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 | ||||
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | ||||
// CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | ||||
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | // CHECK3-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | ||||
▲ Show 20 Lines • Show All 320 Lines • ▼ Show 20 Lines | |||||
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[SIVAR_ADDR]], align 4 | // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[SIVAR_ADDR]], align 4 | ||||
// CHECK4-NEXT: store i32 [[TMP5]], i32* [[SIVAR_CASTED]], align 4 | // CHECK4-NEXT: store i32 [[TMP5]], i32* [[SIVAR_CASTED]], align 4 | ||||
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[SIVAR_CASTED]], align 4 | // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[SIVAR_CASTED]], align 4 | ||||
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32, [2 x %struct.S]*, %struct.S*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i32 [[TMP4]], [2 x %struct.S]* [[TMP1]], %struct.S* [[TMP2]], i32 [[TMP6]]) | // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32, [2 x %struct.S]*, %struct.S*, i32)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i32 [[TMP4]], [2 x %struct.S]* [[TMP1]], %struct.S* [[TMP2]], i32 [[TMP6]]) | ||||
// CHECK4-NEXT: ret void | // CHECK4-NEXT: ret void | ||||
// | // | ||||
// | // | ||||
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined. | // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined. | ||||
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR5:[0-9]+]] { | // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S* nonnull align 4 dereferenceable(4) [[VAR:%.*]], i32 [[SIVAR:%.*]]) #[[ATTR5:[0-9]+]] { | ||||
// CHECK4-NEXT: entry: | // CHECK4-NEXT: entry: | ||||
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 | // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 | ||||
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 | // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 | ||||
// CHECK4-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4 | // CHECK4-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4 | ||||
// CHECK4-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32, align 4 | // CHECK4-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32, align 4 | ||||
// CHECK4-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 4 | // CHECK4-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 4 | ||||
// CHECK4-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 4 | // CHECK4-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S*, align 4 | ||||
// CHECK4-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32, align 4 | // CHECK4-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32, align 4 | ||||
▲ Show 20 Lines • Show All 58 Lines • ▼ Show 20 Lines | |||||
// CHECK4-NEXT: br label [[COND_END]] | // CHECK4-NEXT: br label [[COND_END]] | ||||
// CHECK4: cond.end: | // CHECK4: cond.end: | ||||
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ] | // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ] | ||||
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | ||||
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | ||||
// CHECK4-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4 | // CHECK4-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV]], align 4 | ||||
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | ||||
// CHECK4: omp.inner.for.cond: | // CHECK4: omp.inner.for.cond: | ||||
// CHECK4-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] | // CHECK4-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] | ||||
// CHECK4-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | // CHECK4-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | ||||
// CHECK4: omp.inner.for.cond.cleanup: | // CHECK4: omp.inner.for.cond.cleanup: | ||||
// CHECK4-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | // CHECK4-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | ||||
// CHECK4: omp.inner.for.body: | // CHECK4: omp.inner.for.body: | ||||
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1 | // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1 | ||||
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | ||||
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[T_VAR]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[T_VAR_ADDR]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i32 0, i32 [[TMP24]] | // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC1]], i32 0, i32 [[TMP16]] | ||||
// CHECK4-NEXT: store i32 [[TMP23]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: store i32 [[TMP15]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 [[TMP25]] | // CHECK4-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR2]], i32 0, i32 [[TMP17]] | ||||
// CHECK4-NEXT: [[TMP26:%.*]] = bitcast %struct.S* [[ARRAYIDX4]] to i8* | // CHECK4-NEXT: [[TMP18:%.*]] = bitcast %struct.S* [[ARRAYIDX7]] to i8* | ||||
// CHECK4-NEXT: [[TMP27:%.*]] = bitcast %struct.S* [[VAR]] to i8* | // CHECK4-NEXT: [[TMP19:%.*]] = bitcast %struct.S* [[VAR4]] to i8* | ||||
// CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP26]], i8* align 4 [[TMP27]], i32 4, i1 false), !llvm.access.group !6 | // CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP18]], i8* align 4 [[TMP19]], i32 4, i1 false), !llvm.access.group !6 | ||||
// CHECK4-NEXT: [[TMP28:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: [[TMP20:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: [[TMP29:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: [[TMP21:%.*]] = load i32, i32* [[SIVAR_ADDR]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP29]], [[TMP28]] | // CHECK4-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP21]], [[TMP20]] | ||||
// CHECK4-NEXT: store i32 [[ADD5]], i32* [[SIVAR]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: store i32 [[ADD8]], i32* [[SIVAR_ADDR]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | ||||
// CHECK4: omp.body.continue: | // CHECK4: omp.body.continue: | ||||
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | ||||
// CHECK4: omp.inner.for.inc: | // CHECK4: omp.inner.for.inc: | ||||
// CHECK4-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP30]], 1 | // CHECK4-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], 1 | ||||
// CHECK4-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | // CHECK4-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 | ||||
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] | // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] | ||||
// CHECK4: omp.inner.for.end: | // CHECK4: omp.inner.for.end: | ||||
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | ||||
// CHECK4: omp.loop.exit: | // CHECK4: omp.loop.exit: | ||||
// CHECK4-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 | // CHECK4-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 | ||||
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | // CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | ||||
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | ||||
// CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | // CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | ||||
▲ Show 20 Lines • Show All 224 Lines • ▼ Show 20 Lines | |||||
// CHECK4-NEXT: store i32 [[TMP3]], i32* [[T_VAR_CASTED]], align 4 | // CHECK4-NEXT: store i32 [[TMP3]], i32* [[T_VAR_CASTED]], align 4 | ||||
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4 | // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4 | ||||
// CHECK4-NEXT: [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4 | // CHECK4-NEXT: [[TMP5:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP]], align 4 | ||||
// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i32 [[TMP4]], [2 x %struct.S.0]* [[TMP1]], %struct.S.0* [[TMP5]]) | // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i32, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), [2 x i32]* [[TMP0]], i32 [[TMP4]], [2 x %struct.S.0]* [[TMP1]], %struct.S.0* [[TMP5]]) | ||||
// CHECK4-NEXT: ret void | // CHECK4-NEXT: ret void | ||||
// | // | ||||
// | // | ||||
// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3 | // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3 | ||||
// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon.1* noalias [[__CONTEXT:%.*]]) #[[ATTR5]] { | // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [2 x i32]* nonnull align 4 dereferenceable(8) [[VEC:%.*]], i32 [[T_VAR:%.*]], [2 x %struct.S.0]* nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR5]] { | ||||
// CHECK4-NEXT: entry: | // CHECK4-NEXT: entry: | ||||
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 | // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 | ||||
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 | // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 | ||||
// CHECK4-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4 | // CHECK4-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 4 | ||||
// CHECK4-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32, align 4 | // CHECK4-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32, align 4 | ||||
// CHECK4-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 4 | // CHECK4-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 4 | ||||
// CHECK4-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 4 | // CHECK4-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 4 | ||||
// CHECK4-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 4 | // CHECK4-NEXT: [[TMP:%.*]] = alloca %struct.S.0*, align 4 | ||||
▲ Show 20 Lines • Show All 61 Lines • ▼ Show 20 Lines | |||||
// CHECK4-NEXT: br label [[COND_END]] | // CHECK4-NEXT: br label [[COND_END]] | ||||
// CHECK4: cond.end: | // CHECK4: cond.end: | ||||
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] | // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] | ||||
// CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | ||||
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | ||||
// CHECK4-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4 | // CHECK4-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV]], align 4 | ||||
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | ||||
// CHECK4: omp.inner.for.cond: | // CHECK4: omp.inner.for.cond: | ||||
// CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | ||||
// CHECK4-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 | // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 | ||||
// CHECK4-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]] | // CHECK4-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] | ||||
// CHECK4-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | // CHECK4-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] | ||||
// CHECK4: omp.inner.for.cond.cleanup: | // CHECK4: omp.inner.for.cond.cleanup: | ||||
// CHECK4-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | // CHECK4-NEXT: br label [[OMP_INNER_FOR_END:%.*]] | ||||
// CHECK4: omp.inner.for.body: | // CHECK4: omp.inner.for.body: | ||||
// CHECK4-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | ||||
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP21]], 1 | // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 | ||||
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | ||||
// CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 | // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 | ||||
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[T_VAR]], align 4, !llvm.access.group !12 | // CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[T_VAR_ADDR]], align 4, !llvm.access.group !12 | ||||
// CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 | // CHECK4-NEXT: [[TMP17:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 | ||||
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i32 0, i32 [[TMP23]] | // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC2]], i32 0, i32 [[TMP17]] | ||||
// CHECK4-NEXT: store i32 [[TMP22]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !12 | // CHECK4-NEXT: store i32 [[TMP16]], i32* [[ARRAYIDX]], align 4, !llvm.access.group !12 | ||||
// CHECK4-NEXT: [[TMP24:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP4]], align 4, !llvm.access.group !12 | // CHECK4-NEXT: [[TMP18:%.*]] = load %struct.S.0*, %struct.S.0** [[_TMP7]], align 4, !llvm.access.group !12 | ||||
// CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 | // CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 | ||||
// CHECK4-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 [[TMP25]] | // CHECK4-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i32 0, i32 [[TMP19]] | ||||
// CHECK4-NEXT: [[TMP26:%.*]] = bitcast %struct.S.0* [[ARRAYIDX6]] to i8* | // CHECK4-NEXT: [[TMP20:%.*]] = bitcast %struct.S.0* [[ARRAYIDX9]] to i8* | ||||
// CHECK4-NEXT: [[TMP27:%.*]] = bitcast %struct.S.0* [[TMP24]] to i8* | // CHECK4-NEXT: [[TMP21:%.*]] = bitcast %struct.S.0* [[TMP18]] to i8* | ||||
// CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP26]], i8* align 4 [[TMP27]], i32 4, i1 false), !llvm.access.group !12 | // CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP20]], i8* align 4 [[TMP21]], i32 4, i1 false), !llvm.access.group !12 | ||||
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | ||||
// CHECK4: omp.body.continue: | // CHECK4: omp.body.continue: | ||||
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | ||||
// CHECK4: omp.inner.for.inc: | // CHECK4: omp.inner.for.inc: | ||||
// CHECK4-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | // CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | ||||
// CHECK4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP28]], 1 | // CHECK4-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP22]], 1 | ||||
// CHECK4-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | // CHECK4-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 | ||||
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] | // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] | ||||
// CHECK4: omp.inner.for.end: | // CHECK4: omp.inner.for.end: | ||||
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | ||||
// CHECK4: omp.loop.exit: | // CHECK4: omp.loop.exit: | ||||
// CHECK4-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 | // CHECK4-NEXT: [[TMP23:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 | ||||
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | // CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP23]], align 4 | ||||
// CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP24]]) | ||||
// CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | // CHECK4-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | ||||
▲ Show 20 Lines • Show All 1,561 Lines • ▼ Show 20 Lines | |||||
// CHECK9-NEXT: [[CONV5:%.*]] = bitcast i64* [[SIVAR_CASTED]] to i32* | // CHECK9-NEXT: [[CONV5:%.*]] = bitcast i64* [[SIVAR_CASTED]] to i32* | ||||
// CHECK9-NEXT: store i32 [[TMP5]], i32* [[CONV5]], align 4 | // CHECK9-NEXT: store i32 [[TMP5]], i32* [[CONV5]], align 4 | ||||
// CHECK9-NEXT: [[TMP6:%.*]] = load i64, i64* [[SIVAR_CASTED]], align 8 | // CHECK9-NEXT: [[TMP6:%.*]] = load i64, i64* [[SIVAR_CASTED]], align 8 | ||||
// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP4]], i64 [[TMP6]]) | // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP4]], i64 [[TMP6]]) | ||||
// CHECK9-NEXT: ret void | // CHECK9-NEXT: ret void | ||||
// | // | ||||
// | // | ||||
// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined. | // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined. | ||||
// CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR6:[0-9]+]] { | // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[G:%.*]], i64 [[G1:%.*]], i64 [[SIVAR:%.*]]) #[[ATTR6:[0-9]+]] { | ||||
// CHECK9-NEXT: entry: | // CHECK9-NEXT: entry: | ||||
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 | // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 | ||||
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 | // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 | ||||
// CHECK9-NEXT: [[G_ADDR:%.*]] = alloca i64, align 8 | // CHECK9-NEXT: [[G_ADDR:%.*]] = alloca i64, align 8 | ||||
// CHECK9-NEXT: [[G1_ADDR:%.*]] = alloca i64, align 8 | // CHECK9-NEXT: [[G1_ADDR:%.*]] = alloca i64, align 8 | ||||
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8 | // CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8 | ||||
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32*, align 8 | // CHECK9-NEXT: [[TMP:%.*]] = alloca i32*, align 8 | ||||
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 | // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 | ||||
Show All 30 Lines | |||||
// CHECK9-NEXT: br label [[COND_END]] | // CHECK9-NEXT: br label [[COND_END]] | ||||
// CHECK9: cond.end: | // CHECK9: cond.end: | ||||
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] | // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] | ||||
// CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | ||||
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | ||||
// CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 | // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 | ||||
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | ||||
// CHECK9: omp.inner.for.cond: | // CHECK9: omp.inner.for.cond: | ||||
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | ||||
// CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !4 | // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !4 | ||||
// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] | // CHECK9-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] | ||||
// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] | // CHECK9-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] | ||||
// CHECK9: omp.inner.for.body: | // CHECK9: omp.inner.for.body: | ||||
// CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | ||||
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1 | // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 | ||||
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | ||||
// CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !4 | // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !4 | ||||
// CHECK9-NEXT: store i32 1, i32* [[G]], align 4, !llvm.access.group !4 | // CHECK9-NEXT: store i32 1, i32* [[CONV]], align 8, !llvm.access.group !4 | ||||
// CHECK9-NEXT: [[TMP15:%.*]] = load i32*, i32** [[TMP]], align 8, !llvm.access.group !4 | // CHECK9-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP]], align 8, !llvm.access.group !4 | ||||
// CHECK9-NEXT: store volatile i32 1, i32* [[TMP15]], align 4, !llvm.access.group !4 | // CHECK9-NEXT: store volatile i32 1, i32* [[TMP8]], align 4, !llvm.access.group !4 | ||||
// CHECK9-NEXT: store i32 2, i32* [[SIVAR]], align 4, !llvm.access.group !4 | // CHECK9-NEXT: store i32 2, i32* [[CONV2]], align 8, !llvm.access.group !4 | ||||
// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0 | // CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0 | ||||
// CHECK9-NEXT: store i32* [[G]], i32** [[TMP16]], align 8, !llvm.access.group !4 | // CHECK9-NEXT: store i32* [[CONV]], i32** [[TMP9]], align 8, !llvm.access.group !4 | ||||
// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1 | // CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1 | ||||
// CHECK9-NEXT: [[TMP18:%.*]] = load i32*, i32** [[TMP]], align 8, !llvm.access.group !4 | // CHECK9-NEXT: [[TMP11:%.*]] = load i32*, i32** [[TMP]], align 8, !llvm.access.group !4 | ||||
// CHECK9-NEXT: store i32* [[TMP18]], i32** [[TMP17]], align 8, !llvm.access.group !4 | // CHECK9-NEXT: store i32* [[TMP11]], i32** [[TMP10]], align 8, !llvm.access.group !4 | ||||
// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2 | // CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2 | ||||
// CHECK9-NEXT: store i32* [[SIVAR]], i32** [[TMP19]], align 8, !llvm.access.group !4 | // CHECK9-NEXT: store i32* [[CONV2]], i32** [[TMP12]], align 8, !llvm.access.group !4 | ||||
// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(24) [[REF_TMP]]), !llvm.access.group !4 | // CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(24) [[REF_TMP]]), !llvm.access.group !4 | ||||
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | ||||
// CHECK9: omp.body.continue: | // CHECK9: omp.body.continue: | ||||
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | ||||
// CHECK9: omp.inner.for.inc: | // CHECK9: omp.inner.for.inc: | ||||
// CHECK9-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | ||||
// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP20]], 1 | // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP13]], 1 | ||||
// CHECK9-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | // CHECK9-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | ||||
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] | // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] | ||||
// CHECK9: omp.inner.for.end: | // CHECK9: omp.inner.for.end: | ||||
// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | ||||
// CHECK9: omp.loop.exit: | // CHECK9: omp.loop.exit: | ||||
// CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) | // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) | ||||
// CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | ||||
// CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 | // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 | ||||
// CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] | // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] | ||||
▲ Show 20 Lines • Show All 173 Lines • ▼ Show 20 Lines | |||||
// CHECK10-NEXT: [[CONV5:%.*]] = bitcast i64* [[SIVAR_CASTED]] to i32* | // CHECK10-NEXT: [[CONV5:%.*]] = bitcast i64* [[SIVAR_CASTED]] to i32* | ||||
// CHECK10-NEXT: store i32 [[TMP5]], i32* [[CONV5]], align 4 | // CHECK10-NEXT: store i32 [[TMP5]], i32* [[CONV5]], align 4 | ||||
// CHECK10-NEXT: [[TMP6:%.*]] = load i64, i64* [[SIVAR_CASTED]], align 8 | // CHECK10-NEXT: [[TMP6:%.*]] = load i64, i64* [[SIVAR_CASTED]], align 8 | ||||
// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP4]], i64 [[TMP6]]) | // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP4]], i64 [[TMP6]]) | ||||
// CHECK10-NEXT: ret void | // CHECK10-NEXT: ret void | ||||
// | // | ||||
// | // | ||||
// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined. | // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined. | ||||
// CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.anon* noalias [[__CONTEXT:%.*]]) #[[ATTR6:[0-9]+]] { | // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[G:%.*]], i64 [[G1:%.*]], i64 [[SIVAR:%.*]]) #[[ATTR6:[0-9]+]] { | ||||
// CHECK10-NEXT: entry: | // CHECK10-NEXT: entry: | ||||
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 | // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 | ||||
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 | // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 | ||||
// CHECK10-NEXT: [[G_ADDR:%.*]] = alloca i64, align 8 | // CHECK10-NEXT: [[G_ADDR:%.*]] = alloca i64, align 8 | ||||
// CHECK10-NEXT: [[G1_ADDR:%.*]] = alloca i64, align 8 | // CHECK10-NEXT: [[G1_ADDR:%.*]] = alloca i64, align 8 | ||||
// CHECK10-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8 | // CHECK10-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8 | ||||
// CHECK10-NEXT: [[TMP:%.*]] = alloca i32*, align 8 | // CHECK10-NEXT: [[TMP:%.*]] = alloca i32*, align 8 | ||||
// CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 | // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 | ||||
Show All 30 Lines | |||||
// CHECK10-NEXT: br label [[COND_END]] | // CHECK10-NEXT: br label [[COND_END]] | ||||
// CHECK10: cond.end: | // CHECK10: cond.end: | ||||
// CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] | // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] | ||||
// CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 | ||||
// CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 | ||||
// CHECK10-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 | // CHECK10-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 | ||||
// CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] | ||||
// CHECK10: omp.inner.for.cond: | // CHECK10: omp.inner.for.cond: | ||||
// CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | ||||
// CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !4 | // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !4 | ||||
// CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] | // CHECK10-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] | ||||
// CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] | // CHECK10-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] | ||||
// CHECK10: omp.inner.for.body: | // CHECK10: omp.inner.for.body: | ||||
// CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | ||||
// CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1 | // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 | ||||
// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] | ||||
// CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !4 | // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !4 | ||||
// CHECK10-NEXT: store i32 1, i32* [[G]], align 4, !llvm.access.group !4 | // CHECK10-NEXT: store i32 1, i32* [[CONV]], align 8, !llvm.access.group !4 | ||||
// CHECK10-NEXT: [[TMP15:%.*]] = load i32*, i32** [[TMP]], align 8, !llvm.access.group !4 | // CHECK10-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP]], align 8, !llvm.access.group !4 | ||||
// CHECK10-NEXT: store volatile i32 1, i32* [[TMP15]], align 4, !llvm.access.group !4 | // CHECK10-NEXT: store volatile i32 1, i32* [[TMP8]], align 4, !llvm.access.group !4 | ||||
// CHECK10-NEXT: store i32 2, i32* [[SIVAR]], align 4, !llvm.access.group !4 | // CHECK10-NEXT: store i32 2, i32* [[CONV2]], align 8, !llvm.access.group !4 | ||||
// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0 | // CHECK10-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0 | ||||
// CHECK10-NEXT: store i32* [[G]], i32** [[TMP16]], align 8, !llvm.access.group !4 | // CHECK10-NEXT: store i32* [[CONV]], i32** [[TMP9]], align 8, !llvm.access.group !4 | ||||
// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1 | // CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1 | ||||
// CHECK10-NEXT: [[TMP18:%.*]] = load i32*, i32** [[TMP]], align 8, !llvm.access.group !4 | // CHECK10-NEXT: [[TMP11:%.*]] = load i32*, i32** [[TMP]], align 8, !llvm.access.group !4 | ||||
// CHECK10-NEXT: store i32* [[TMP18]], i32** [[TMP17]], align 8, !llvm.access.group !4 | // CHECK10-NEXT: store i32* [[TMP11]], i32** [[TMP10]], align 8, !llvm.access.group !4 | ||||
// CHECK10-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2 | // CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 2 | ||||
// CHECK10-NEXT: store i32* [[SIVAR]], i32** [[TMP19]], align 8, !llvm.access.group !4 | // CHECK10-NEXT: store i32* [[CONV2]], i32** [[TMP12]], align 8, !llvm.access.group !4 | ||||
// CHECK10-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(24) [[REF_TMP]]), !llvm.access.group !4 | // CHECK10-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* nonnull align 8 dereferenceable(24) [[REF_TMP]]), !llvm.access.group !4 | ||||
// CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] | ||||
// CHECK10: omp.body.continue: | // CHECK10: omp.body.continue: | ||||
// CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] | ||||
// CHECK10: omp.inner.for.inc: | // CHECK10: omp.inner.for.inc: | ||||
// CHECK10-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | ||||
// CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP20]], 1 | // CHECK10-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP13]], 1 | ||||
// CHECK10-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | // CHECK10-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 | ||||
// CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] | // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] | ||||
// CHECK10: omp.inner.for.end: | // CHECK10: omp.inner.for.end: | ||||
// CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] | ||||
// CHECK10: omp.loop.exit: | // CHECK10: omp.loop.exit: | ||||
// CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) | // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) | ||||
// CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 | ||||
// CHECK10-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 | // CHECK10-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 | ||||
// CHECK10-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] | // CHECK10-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] | ||||
▲ Show 20 Lines • Show All 286 Lines • Show Last 20 Lines |