diff --git a/clang/test/OpenMP/nvptx_parallel_codegen.cpp b/clang/test/OpenMP/nvptx_parallel_codegen.cpp --- a/clang/test/OpenMP/nvptx_parallel_codegen.cpp +++ b/clang/test/OpenMP/nvptx_parallel_codegen.cpp @@ -1,12 +1,12 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ // Test target codegen - host bc file has to be created first. // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc -// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK1 -// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns -fopenmp-cuda-parallel-target-regions | FileCheck %s --check-prefix=CHECK2 +// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefixes=CHECK_ALL,CHECK__64,CHECK__R1 +// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -aux-triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns -fopenmp-cuda-parallel-target-regions | FileCheck %s --check-prefixes=CHECK_ALL,CHECK__64,CHECK__R2 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc -// RUN: %clang_cc1 -verify -fopenmp -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns -disable-O0-optnone | FileCheck %s --check-prefix=CHECK3 -// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns -fopenmp-cuda-parallel-target-regions | FileCheck %s --check-prefix=CHECK4 -// RUN: %clang_cc1 -verify -fopenmp -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns -fopenmp-cuda-parallel-target-regions | FileCheck %s --check-prefix=CHECK5 +// RUN: %clang_cc1 -verify -fopenmp -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns -disable-O0-optnone | FileCheck %s --check-prefixes=CHECK_ALL,CHECK__32,CHECK__R3 +// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns -fopenmp-cuda-parallel-target-regions | FileCheck %s --check-prefixes=CHECK_ALL,CHECK__32,CHECK_R45,CHECK__R4 +// RUN: %clang_cc1 -verify -fopenmp -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -aux-triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns -fopenmp-cuda-parallel-target-regions | FileCheck %s --check-prefixes=CHECK_ALL,CHECK__32,CHECK_R45,CHECK__R5 // expected-no-diagnostics #ifndef HEADER #define HEADER @@ -76,1385 +76,672 @@ } #endif -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29 -// CHECK1-SAME: (i64 [[A:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8 -// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8 -// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS2:%.*]] = alloca [0 x i8*], align 8 -// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* -// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 false, i1 true, i1 true) -// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 -// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK1: user_code.entry: -// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK1-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP2]], i64 0) -// CHECK1-NEXT: [[TMP3:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8** -// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 0, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP3]], i64 0) -// CHECK1-NEXT: [[TMP4:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS2]] to i8** -// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8** [[TMP4]], i64 0) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[CONV]], align 8 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 -// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK1-NEXT: ret void -// CHECK1: worker.exit: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__ -// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 42, i32* [[A]], align 4 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined___wrapper -// CHECK1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 -// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2:[0-9]+]] -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1 -// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 43, i32* [[A]], align 4 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper -// CHECK1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 -// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK1-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__2 -// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 44, i32* [[A]], align 4 -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper -// CHECK1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 -// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK1-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46 -// CHECK1-SAME: (i64 [[N:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 -// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8 -// CHECK1-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 -// CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 -// CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* -// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_ADDR]] to i32* -// CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16* -// CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) -// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1 -// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK1: user_code.entry: -// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1000 -// CHECK1-NEXT: [[TMP4:%.*]] = zext i1 [[CMP]] to i32 -// CHECK1-NEXT: [[TMP5:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 [[TMP4]], i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8** [[TMP5]], i64 0) -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV1]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i16, i16* [[CONV2]], align 8 -// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP7]] to i32 -// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 -// CHECK1-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 -// CHECK1-NEXT: store i16 [[CONV5]], i16* [[CONV2]], align 8 -// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK1-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4 -// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK1-NEXT: ret void -// CHECK1: worker.exit: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__3 -// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32 45, i32* [[A]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]]) -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper -// CHECK1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 -// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK1-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l58 -// CHECK1-SAME: (i64 [[A:%.*]]) #[[ATTR0]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8 -// CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 -// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* -// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) -// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 -// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK1: user_code.entry: -// CHECK1-NEXT: [[TMP1:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* @"_openmp_static_kernel$size", align 8 -// CHECK1-NEXT: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i64 [[TMP2]], i16 [[TMP1]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**)) -// CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[TMP3]], i64 0 -// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct._globalized_locals_ty* -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[CONV]], align 8 -// CHECK1-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP5]], i32 0, i32 0 -// CHECK1-NEXT: store i32 [[TMP6]], i32* [[A1]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP9:%.*]] = bitcast i32* [[A1]] to i8* -// CHECK1-NEXT: store i8* [[TMP9]], i8** [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8** [[TMP10]], i64 1) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[A1]], align 4 -// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK1-NEXT: store i32 [[INC]], i32* [[A1]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2 -// CHECK1-NEXT: call void @__kmpc_restore_team_static_memory(i16 0, i16 [[TMP12]]) -// CHECK1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK1-NEXT: ret void -// CHECK1: worker.exit: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__4 -// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR0]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 -// CHECK1-NEXT: [[CRITICAL_COUNTER:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 -// CHECK1-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_warp_active_thread_mask() -// CHECK1-NEXT: [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() -// CHECK1-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() -// CHECK1-NEXT: store i32 0, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK1-NEXT: br label [[OMP_CRITICAL_LOOP:%.*]] -// CHECK1: omp.critical.loop: -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], [[NVPTX_NUM_THREADS]] -// CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_CRITICAL_TEST:%.*]], label [[OMP_CRITICAL_EXIT:%.*]] -// CHECK1: omp.critical.test: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = icmp eq i32 [[NVPTX_TID]], [[TMP4]] -// CHECK1-NEXT: br i1 [[TMP5]], label [[OMP_CRITICAL_BODY:%.*]], label [[OMP_CRITICAL_SYNC:%.*]] -// CHECK1: omp.critical.body: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4 -// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4 -// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK1-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4 -// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") -// CHECK1-NEXT: br label [[OMP_CRITICAL_SYNC]] -// CHECK1: omp.critical.sync: -// CHECK1-NEXT: call void @__kmpc_syncwarp(i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK1-NEXT: store i32 [[TMP9]], i32* [[CRITICAL_COUNTER]], align 4 -// CHECK1-NEXT: br label [[OMP_CRITICAL_LOOP]] -// CHECK1: omp.critical.exit: -// CHECK1-NEXT: ret void -// -// -// CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__4_wrapper -// CHECK1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK1-NEXT: entry: -// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 -// CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0 -// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32** -// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8 -// CHECK1-NEXT: call void @__omp_outlined__4(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR2]] -// CHECK1-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29 -// CHECK2-SAME: (i64 [[A:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8 -// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8 -// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS2:%.*]] = alloca [0 x i8*], align 8 -// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 -// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* -// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 false, i1 true, i1 true) -// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 -// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK2: user_code.entry: -// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK2-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP2]], i64 0) -// CHECK2-NEXT: [[TMP3:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8** -// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 0, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP3]], i64 0) -// CHECK2-NEXT: [[TMP4:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS2]] to i8** -// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8** [[TMP4]], i64 0) -// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[CONV]], align 8 -// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK2-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 -// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK2-NEXT: ret void -// CHECK2: worker.exit: -// CHECK2-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__ -// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 -// CHECK2-NEXT: store i32 42, i32* [[A]], align 4 -// CHECK2-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined___wrapper -// CHECK2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 -// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2:[0-9]+]] -// CHECK2-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1 -// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 -// CHECK2-NEXT: store i32 43, i32* [[A]], align 4 -// CHECK2-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper -// CHECK2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 -// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK2-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] -// CHECK2-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__2 -// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 -// CHECK2-NEXT: store i32 44, i32* [[A]], align 4 -// CHECK2-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper -// CHECK2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 -// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK2-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] -// CHECK2-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46 -// CHECK2-SAME: (i64 [[N:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 -// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 -// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 -// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8 -// CHECK2-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8 -// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 -// CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 -// CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 -// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* -// CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_ADDR]] to i32* -// CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16* -// CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 -// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) -// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1 -// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK2: user_code.entry: -// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 -// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1000 -// CHECK2-NEXT: [[TMP4:%.*]] = zext i1 [[CMP]] to i32 -// CHECK2-NEXT: [[TMP5:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 [[TMP4]], i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8** [[TMP5]], i64 0) -// CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8 -// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK2-NEXT: store i32 [[ADD]], i32* [[CONV1]], align 8 -// CHECK2-NEXT: [[TMP7:%.*]] = load i16, i16* [[CONV2]], align 8 -// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP7]] to i32 -// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 -// CHECK2-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 -// CHECK2-NEXT: store i16 [[CONV5]], i16* [[CONV2]], align 8 -// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 -// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK2-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4 -// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK2-NEXT: ret void -// CHECK2: worker.exit: -// CHECK2-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__3 -// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 -// CHECK2-NEXT: store i32 45, i32* [[A]], align 4 -// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 -// CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]]) -// CHECK2-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper -// CHECK2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 -// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK2-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] -// CHECK2-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l58 -// CHECK2-SAME: (i64 [[A:%.*]]) #[[ATTR0]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8 -// CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 -// CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* -// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) -// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 -// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK2: user_code.entry: -// CHECK2-NEXT: [[TMP1:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i64 4, i16 1) -// CHECK2-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct._globalized_locals_ty* -// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 -// CHECK2-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP2]], i32 0, i32 0 -// CHECK2-NEXT: store i32 [[TMP3]], i32* [[A1]], align 4 -// CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 -// CHECK2-NEXT: [[TMP6:%.*]] = bitcast i32* [[A1]] to i8* -// CHECK2-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8 -// CHECK2-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8** [[TMP7]], i64 1) -// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[A1]], align 4 -// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK2-NEXT: store i32 [[INC]], i32* [[A1]], align 4 -// CHECK2-NEXT: call void @__kmpc_data_sharing_pop_stack(i8* [[TMP1]]) -// CHECK2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK2-NEXT: ret void -// CHECK2: worker.exit: -// CHECK2-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__4 -// CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR0]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 -// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 -// CHECK2-NEXT: [[CRITICAL_COUNTER:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 -// CHECK2-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 -// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 -// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_warp_active_thread_mask() -// CHECK2-NEXT: [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() -// CHECK2-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() -// CHECK2-NEXT: store i32 0, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK2-NEXT: br label [[OMP_CRITICAL_LOOP:%.*]] -// CHECK2: omp.critical.loop: -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK2-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], [[NVPTX_NUM_THREADS]] -// CHECK2-NEXT: br i1 [[TMP3]], label [[OMP_CRITICAL_TEST:%.*]], label [[OMP_CRITICAL_EXIT:%.*]] -// CHECK2: omp.critical.test: -// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK2-NEXT: [[TMP5:%.*]] = icmp eq i32 [[NVPTX_TID]], [[TMP4]] -// CHECK2-NEXT: br i1 [[TMP5]], label [[OMP_CRITICAL_BODY:%.*]], label [[OMP_CRITICAL_SYNC:%.*]] -// CHECK2: omp.critical.body: -// CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4 -// CHECK2-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") -// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4 -// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK2-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4 -// CHECK2-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") -// CHECK2-NEXT: br label [[OMP_CRITICAL_SYNC]] -// CHECK2: omp.critical.sync: -// CHECK2-NEXT: call void @__kmpc_syncwarp(i32 [[TMP1]]) -// CHECK2-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK2-NEXT: store i32 [[TMP9]], i32* [[CRITICAL_COUNTER]], align 4 -// CHECK2-NEXT: br label [[OMP_CRITICAL_LOOP]] -// CHECK2: omp.critical.exit: -// CHECK2-NEXT: ret void -// -// -// CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__4_wrapper -// CHECK2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK2-NEXT: entry: -// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 -// CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK2-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8 -// CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0 -// CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32** -// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8 -// CHECK2-NEXT: call void @__omp_outlined__4(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR2]] -// CHECK2-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29 -// CHECK3-SAME: (i32 [[A:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 4 -// CHECK3-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 4 -// CHECK3-NEXT: [[CAPTURED_VARS_ADDRS2:%.*]] = alloca [0 x i8*], align 4 -// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 false, i1 true, i1 true) -// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 -// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK3: user_code.entry: -// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK3-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP2]], i32 0) -// CHECK3-NEXT: [[TMP3:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8** -// CHECK3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 0, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP3]], i32 0) -// CHECK3-NEXT: [[TMP4:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS2]] to i8** -// CHECK3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8** [[TMP4]], i32 0) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_ADDR]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK3-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 -// CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK3-NEXT: ret void -// CHECK3: worker.exit: -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__ -// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK3-NEXT: store i32 42, i32* [[A]], align 4 -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@__omp_outlined___wrapper -// CHECK3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK3-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK3-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK3-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR1:[0-9]+]] -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__1 -// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK3-NEXT: store i32 43, i32* [[A]], align 4 -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper -// CHECK3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK3-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK3-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK3-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR1]] -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__2 -// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK3-NEXT: store i32 44, i32* [[A]], align 4 -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper -// CHECK3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK3-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK3-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK3-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR1]] -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46 -// CHECK3-SAME: (i32 [[N:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 -// CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 4 -// CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 -// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 -// CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 -// CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 -// CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* -// CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) -// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1 -// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK3: user_code.entry: -// CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 -// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1000 -// CHECK3-NEXT: [[TMP4:%.*]] = zext i1 [[CMP]] to i32 -// CHECK3-NEXT: [[TMP5:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 [[TMP4]], i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8** [[TMP5]], i32 0) -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[A_ADDR]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK3-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i16, i16* [[CONV]], align 4 -// CHECK3-NEXT: [[CONV1:%.*]] = sext i16 [[TMP7]] to i32 -// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1 -// CHECK3-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK3-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4 -// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4 -// CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK3-NEXT: ret void -// CHECK3: worker.exit: -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__3 -// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK3-NEXT: store i32 45, i32* [[A]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 -// CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]]) -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper -// CHECK3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK3-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK3-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK3-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR1]] -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l58 -// CHECK3-SAME: (i32 [[A:%.*]]) #[[ATTR0]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4 -// CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) -// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 -// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK3: user_code.entry: -// CHECK3-NEXT: [[TMP1:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* @"_openmp_static_kernel$size", align 4 -// CHECK3-NEXT: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i32 [[TMP2]], i16 [[TMP1]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**)) -// CHECK3-NEXT: [[TMP3:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[TMP3]], i32 0 -// CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct._globalized_locals_ty* -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[A_ADDR]], align 4 -// CHECK3-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP5]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP6]], i32* [[A1]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP9:%.*]] = bitcast i32* [[A1]] to i8* -// CHECK3-NEXT: store i8* [[TMP9]], i8** [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8** [[TMP10]], i32 1) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[A1]], align 4 -// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[INC]], i32* [[A1]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2 -// CHECK3-NEXT: call void @__kmpc_restore_team_static_memory(i16 0, i16 [[TMP12]]) -// CHECK3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK3-NEXT: ret void -// CHECK3: worker.exit: -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__4 -// CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR0]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 -// CHECK3-NEXT: [[CRITICAL_COUNTER:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK3-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_warp_active_thread_mask() -// CHECK3-NEXT: [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() -// CHECK3-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() -// CHECK3-NEXT: store i32 0, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK3-NEXT: br label [[OMP_CRITICAL_LOOP:%.*]] -// CHECK3: omp.critical.loop: -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], [[NVPTX_NUM_THREADS]] -// CHECK3-NEXT: br i1 [[TMP3]], label [[OMP_CRITICAL_TEST:%.*]], label [[OMP_CRITICAL_EXIT:%.*]] -// CHECK3: omp.critical.test: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = icmp eq i32 [[NVPTX_TID]], [[TMP4]] -// CHECK3-NEXT: br i1 [[TMP5]], label [[OMP_CRITICAL_BODY:%.*]], label [[OMP_CRITICAL_SYNC:%.*]] -// CHECK3: omp.critical.body: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4 -// CHECK3-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4 -// CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK3-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4 -// CHECK3-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") -// CHECK3-NEXT: br label [[OMP_CRITICAL_SYNC]] -// CHECK3: omp.critical.sync: -// CHECK3-NEXT: call void @__kmpc_syncwarp(i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[TMP9]], i32* [[CRITICAL_COUNTER]], align 4 -// CHECK3-NEXT: br label [[OMP_CRITICAL_LOOP]] -// CHECK3: omp.critical.exit: -// CHECK3-NEXT: ret void -// -// -// CHECK3-LABEL: define {{[^@]+}}@__omp_outlined__4_wrapper -// CHECK3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] { -// CHECK3-NEXT: entry: -// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK3-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK3-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK3-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i32 0 -// CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32** -// CHECK3-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 4 -// CHECK3-NEXT: call void @__omp_outlined__4(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR1]] -// CHECK3-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29 -// CHECK4-SAME: (i32 [[A:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 4 -// CHECK4-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 4 -// CHECK4-NEXT: [[CAPTURED_VARS_ADDRS2:%.*]] = alloca [0 x i8*], align 4 -// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 -// CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 false, i1 true, i1 true) -// CHECK4-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 -// CHECK4-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK4: user_code.entry: -// CHECK4-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK4-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK4-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP2]], i32 0) -// CHECK4-NEXT: [[TMP3:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8** -// CHECK4-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 0, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP3]], i32 0) -// CHECK4-NEXT: [[TMP4:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS2]] to i8** -// CHECK4-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8** [[TMP4]], i32 0) -// CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_ADDR]], align 4 -// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK4-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 -// CHECK4-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK4-NEXT: ret void -// CHECK4: worker.exit: -// CHECK4-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__ -// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK4-NEXT: store i32 42, i32* [[A]], align 4 -// CHECK4-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@__omp_outlined___wrapper -// CHECK4-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK4-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK4-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK4-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK4-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2:[0-9]+]] -// CHECK4-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__1 -// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK4-NEXT: store i32 43, i32* [[A]], align 4 -// CHECK4-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper -// CHECK4-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK4-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK4-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK4-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK4-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] -// CHECK4-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__2 -// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK4-NEXT: store i32 44, i32* [[A]], align 4 -// CHECK4-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper -// CHECK4-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK4-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK4-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK4-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK4-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] -// CHECK4-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46 -// CHECK4-SAME: (i32 [[N:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 -// CHECK4-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 4 -// CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 -// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 -// CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 -// CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 -// CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* -// CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 -// CHECK4-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) -// CHECK4-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1 -// CHECK4-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK4: user_code.entry: -// CHECK4-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 -// CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1000 -// CHECK4-NEXT: [[TMP4:%.*]] = zext i1 [[CMP]] to i32 -// CHECK4-NEXT: [[TMP5:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK4-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 [[TMP4]], i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8** [[TMP5]], i32 0) -// CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[A_ADDR]], align 4 -// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK4-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 -// CHECK4-NEXT: [[TMP7:%.*]] = load i16, i16* [[CONV]], align 4 -// CHECK4-NEXT: [[CONV1:%.*]] = sext i16 [[TMP7]] to i32 -// CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1 -// CHECK4-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK4-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4 -// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK4-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4 -// CHECK4-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK4-NEXT: ret void -// CHECK4: worker.exit: -// CHECK4-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__3 -// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK4-NEXT: store i32 45, i32* [[A]], align 4 -// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 -// CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]]) -// CHECK4-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper -// CHECK4-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK4-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK4-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK4-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK4-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] -// CHECK4-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l58 -// CHECK4-SAME: (i32 [[A:%.*]]) #[[ATTR0]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4 -// CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 -// CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) -// CHECK4-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 -// CHECK4-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK4: user_code.entry: -// CHECK4-NEXT: [[TMP1:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i32 4, i16 1) -// CHECK4-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct._globalized_locals_ty* -// CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4 -// CHECK4-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP2]], i32 0, i32 0 -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[A1]], align 4 -// CHECK4-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP6:%.*]] = bitcast i32* [[A1]] to i8* -// CHECK4-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4 -// CHECK4-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK4-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8** [[TMP7]], i32 1) -// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[A1]], align 4 -// CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK4-NEXT: store i32 [[INC]], i32* [[A1]], align 4 -// CHECK4-NEXT: call void @__kmpc_data_sharing_pop_stack(i8* [[TMP1]]) -// CHECK4-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK4-NEXT: ret void -// CHECK4: worker.exit: -// CHECK4-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__4 -// CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR0]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 -// CHECK4-NEXT: [[CRITICAL_COUNTER:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK4-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 -// CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4 -// CHECK4-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_warp_active_thread_mask() -// CHECK4-NEXT: [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() -// CHECK4-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() -// CHECK4-NEXT: store i32 0, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK4-NEXT: br label [[OMP_CRITICAL_LOOP:%.*]] -// CHECK4: omp.critical.loop: -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK4-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], [[NVPTX_NUM_THREADS]] -// CHECK4-NEXT: br i1 [[TMP3]], label [[OMP_CRITICAL_TEST:%.*]], label [[OMP_CRITICAL_EXIT:%.*]] -// CHECK4: omp.critical.test: -// CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK4-NEXT: [[TMP5:%.*]] = icmp eq i32 [[NVPTX_TID]], [[TMP4]] -// CHECK4-NEXT: br i1 [[TMP5]], label [[OMP_CRITICAL_BODY:%.*]], label [[OMP_CRITICAL_SYNC:%.*]] -// CHECK4: omp.critical.body: -// CHECK4-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4 -// CHECK4-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") -// CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4 -// CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK4-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4 -// CHECK4-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") -// CHECK4-NEXT: br label [[OMP_CRITICAL_SYNC]] -// CHECK4: omp.critical.sync: -// CHECK4-NEXT: call void @__kmpc_syncwarp(i32 [[TMP1]]) -// CHECK4-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK4-NEXT: store i32 [[TMP9]], i32* [[CRITICAL_COUNTER]], align 4 -// CHECK4-NEXT: br label [[OMP_CRITICAL_LOOP]] -// CHECK4: omp.critical.exit: -// CHECK4-NEXT: ret void -// -// -// CHECK4-LABEL: define {{[^@]+}}@__omp_outlined__4_wrapper -// CHECK4-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK4-NEXT: entry: -// CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK4-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK4-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK4-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK4-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK4-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 4 -// CHECK4-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i32 0 -// CHECK4-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32** -// CHECK4-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 4 -// CHECK4-NEXT: call void @__omp_outlined__4(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR2]] -// CHECK4-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29 -// CHECK5-SAME: (i32 [[A:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 4 -// CHECK5-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 4 -// CHECK5-NEXT: [[CAPTURED_VARS_ADDRS2:%.*]] = alloca [0 x i8*], align 4 -// CHECK5-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 false, i1 true, i1 true) -// CHECK5-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 -// CHECK5-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK5: user_code.entry: -// CHECK5-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK5-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK5-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP2]], i32 0) -// CHECK5-NEXT: [[TMP3:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8** -// CHECK5-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 0, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP3]], i32 0) -// CHECK5-NEXT: [[TMP4:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS2]] to i8** -// CHECK5-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8** [[TMP4]], i32 0) -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_ADDR]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK5-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 -// CHECK5-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK5-NEXT: ret void -// CHECK5: worker.exit: -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__ -// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK5-NEXT: store i32 42, i32* [[A]], align 4 -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@__omp_outlined___wrapper -// CHECK5-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK5-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK5-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK5-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK5-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK5-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2:[0-9]+]] -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__1 -// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK5-NEXT: store i32 43, i32* [[A]], align 4 -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper -// CHECK5-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK5-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK5-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK5-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK5-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK5-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__2 -// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK5-NEXT: store i32 44, i32* [[A]], align 4 -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper -// CHECK5-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK5-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK5-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK5-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK5-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK5-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46 -// CHECK5-SAME: (i32 [[N:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 -// CHECK5-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 4 -// CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 -// CHECK5-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 -// CHECK5-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 -// CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 -// CHECK5-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* -// CHECK5-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) -// CHECK5-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1 -// CHECK5-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK5: user_code.entry: -// CHECK5-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1000 -// CHECK5-NEXT: [[TMP4:%.*]] = zext i1 [[CMP]] to i32 -// CHECK5-NEXT: [[TMP5:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK5-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 [[TMP4]], i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8** [[TMP5]], i32 0) -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[A_ADDR]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK5-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i16, i16* [[CONV]], align 4 -// CHECK5-NEXT: [[CONV1:%.*]] = sext i16 [[TMP7]] to i32 -// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1 -// CHECK5-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK5-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4 -// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK5-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4 -// CHECK5-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK5-NEXT: ret void -// CHECK5: worker.exit: -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__3 -// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK5-NEXT: store i32 45, i32* [[A]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 -// CHECK5-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]]) -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper -// CHECK5-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK5-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK5-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK5-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK5-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK5-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l58 -// CHECK5-SAME: (i32 [[A:%.*]]) #[[ATTR0]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4 -// CHECK5-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) -// CHECK5-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 -// CHECK5-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] -// CHECK5: user_code.entry: -// CHECK5-NEXT: [[TMP1:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i32 4, i16 1) -// CHECK5-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct._globalized_locals_ty* -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4 -// CHECK5-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP2]], i32 0, i32 0 -// CHECK5-NEXT: store i32 [[TMP3]], i32* [[A1]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) -// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP6:%.*]] = bitcast i32* [[A1]] to i8* -// CHECK5-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** -// CHECK5-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8** [[TMP7]], i32 1) -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[A1]], align 4 -// CHECK5-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK5-NEXT: store i32 [[INC]], i32* [[A1]], align 4 -// CHECK5-NEXT: call void @__kmpc_data_sharing_pop_stack(i8* [[TMP1]]) -// CHECK5-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) -// CHECK5-NEXT: ret void -// CHECK5: worker.exit: -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__4 -// CHECK5-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR0]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 -// CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 -// CHECK5-NEXT: [[CRITICAL_COUNTER:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 -// CHECK5-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_warp_active_thread_mask() -// CHECK5-NEXT: [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() -// CHECK5-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() -// CHECK5-NEXT: store i32 0, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK5-NEXT: br label [[OMP_CRITICAL_LOOP:%.*]] -// CHECK5: omp.critical.loop: -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK5-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], [[NVPTX_NUM_THREADS]] -// CHECK5-NEXT: br i1 [[TMP3]], label [[OMP_CRITICAL_TEST:%.*]], label [[OMP_CRITICAL_EXIT:%.*]] -// CHECK5: omp.critical.test: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = icmp eq i32 [[NVPTX_TID]], [[TMP4]] -// CHECK5-NEXT: br i1 [[TMP5]], label [[OMP_CRITICAL_BODY:%.*]], label [[OMP_CRITICAL_SYNC:%.*]] -// CHECK5: omp.critical.body: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4 -// CHECK5-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4 -// CHECK5-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK5-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4 -// CHECK5-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") -// CHECK5-NEXT: br label [[OMP_CRITICAL_SYNC]] -// CHECK5: omp.critical.sync: -// CHECK5-NEXT: call void @__kmpc_syncwarp(i32 [[TMP1]]) -// CHECK5-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK5-NEXT: store i32 [[TMP9]], i32* [[CRITICAL_COUNTER]], align 4 -// CHECK5-NEXT: br label [[OMP_CRITICAL_LOOP]] -// CHECK5: omp.critical.exit: -// CHECK5-NEXT: ret void -// -// -// CHECK5-LABEL: define {{[^@]+}}@__omp_outlined__4_wrapper -// CHECK5-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { -// CHECK5-NEXT: entry: -// CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 -// CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 -// CHECK5-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 -// CHECK5-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 -// CHECK5-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 -// CHECK5-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 -// CHECK5-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) -// CHECK5-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 4 -// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i32 0 -// CHECK5-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32** -// CHECK5-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 4 -// CHECK5-NEXT: call void @__omp_outlined__4(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR2]] -// CHECK5-NEXT: ret void +// CHECK__64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29 +// CHECK__64-SAME: (i64 [[A:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK__64-NEXT: entry: +// CHECK__64-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK__64-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8 +// CHECK__64-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 8 +// CHECK__64-NEXT: [[CAPTURED_VARS_ADDRS2:%.*]] = alloca [0 x i8*], align 8 +// CHECK__64-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 +// CHECK__64-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* +// CHECK__64-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 false, i1 true, i1 true) +// CHECK__64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 +// CHECK__64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] +// CHECK__64: user_code.entry: +// CHECK__64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) +// CHECK__64-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** +// CHECK__64-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP2]], i64 0) +// CHECK__64-NEXT: [[TMP3:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8** +// CHECK__64-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 0, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP3]], i64 0) +// CHECK__64-NEXT: [[TMP4:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS2]] to i8** +// CHECK__64-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8** [[TMP4]], i64 0) +// CHECK__64-NEXT: [[TMP5:%.*]] = load i32, i32* [[CONV]], align 8 +// CHECK__64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 1 +// CHECK__64-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 +// CHECK__64-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) +// CHECK__64-NEXT: ret void +// CHECK__64: worker.exit: +// CHECK__64-NEXT: ret void +// +// CHECK__32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l29 +// CHECK__32-SAME: (i32 [[A:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK__32-NEXT: entry: +// CHECK__32-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK__32-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 4 +// CHECK__32-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [0 x i8*], align 4 +// CHECK__32-NEXT: [[CAPTURED_VARS_ADDRS2:%.*]] = alloca [0 x i8*], align 4 +// CHECK__32-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 +// CHECK__32-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i1 false, i1 true, i1 true) +// CHECK__32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 +// CHECK__32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] +// CHECK__32: user_code.entry: +// CHECK__32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) +// CHECK__32-NEXT: [[TMP2:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** +// CHECK__32-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP2]], i32 0) +// CHECK__32-NEXT: [[TMP3:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8** +// CHECK__32-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 0, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP3]], i32 0) +// CHECK__32-NEXT: [[TMP4:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS2]] to i8** +// CHECK__32-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__2 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__2_wrapper to i8*), i8** [[TMP4]], i32 0) +// CHECK__32-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_ADDR]], align 4 +// CHECK__32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], 1 +// CHECK__32-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 +// CHECK__32-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) +// CHECK__32-NEXT: ret void +// CHECK__32: worker.exit: +// CHECK__32-NEXT: ret void +// +// CHECK__64-LABEL: define {{[^@]+}}@__omp_outlined__ +// CHECK__64-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK__64-NEXT: entry: +// CHECK__64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK__64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK__64-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK__64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 +// CHECK__64-NEXT: store i32 42, i32* [[A]], align 4 +// CHECK__64-NEXT: ret void +// +// CHECK__32-LABEL: define {{[^@]+}}@__omp_outlined__ +// CHECK__32-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK__32-NEXT: entry: +// CHECK__32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK__32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK__32-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK__32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK__32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 +// CHECK__32-NEXT: store i32 42, i32* [[A]], align 4 +// CHECK__32-NEXT: ret void +// +// CHECK__64-LABEL: define {{[^@]+}}@__omp_outlined___wrapper +// CHECK__64-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] { +// CHECK__64-NEXT: entry: +// CHECK__64-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK__64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 +// CHECK__64-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK__64-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK__64-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK__64-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK__64-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2:[0-9]+]] +// CHECK__64-NEXT: ret void +// +// CHECK__R3-LABEL: define {{[^@]+}}@__omp_outlined___wrapper +// CHECK__R3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] { +// CHECK__R3-NEXT: entry: +// CHECK__R3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK__R3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK__R3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK__R3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 +// CHECK__R3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK__R3-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK__R3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK__R3-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK__R3-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR1:[0-9]+]] +// CHECK__R3-NEXT: ret void +// +// CHECK_R45-LABEL: define {{[^@]+}}@__omp_outlined___wrapper +// CHECK_R45-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] { +// CHECK_R45-NEXT: entry: +// CHECK_R45-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK_R45-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK_R45-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK_R45-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 +// CHECK_R45-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK_R45-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK_R45-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK_R45-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK_R45-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2:[0-9]+]] +// CHECK_R45-NEXT: ret void +// +// CHECK__64-LABEL: define {{[^@]+}}@__omp_outlined__1 +// CHECK__64-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK__64-NEXT: entry: +// CHECK__64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK__64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK__64-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK__64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 +// CHECK__64-NEXT: store i32 43, i32* [[A]], align 4 +// CHECK__64-NEXT: ret void +// +// CHECK__32-LABEL: define {{[^@]+}}@__omp_outlined__1 +// CHECK__32-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK__32-NEXT: entry: +// CHECK__32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK__32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK__32-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK__32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK__32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 +// CHECK__32-NEXT: store i32 43, i32* [[A]], align 4 +// CHECK__32-NEXT: ret void +// +// CHECK__64-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper +// CHECK__64-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { +// CHECK__64-NEXT: entry: +// CHECK__64-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK__64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 +// CHECK__64-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK__64-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK__64-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK__64-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK__64-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] +// CHECK__64-NEXT: ret void +// +// CHECK__R3-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper +// CHECK__R3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] { +// CHECK__R3-NEXT: entry: +// CHECK__R3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK__R3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK__R3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK__R3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 +// CHECK__R3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK__R3-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK__R3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK__R3-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK__R3-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR1]] +// CHECK__R3-NEXT: ret void +// +// CHECK_R45-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper +// CHECK_R45-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { +// CHECK_R45-NEXT: entry: +// CHECK_R45-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK_R45-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK_R45-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK_R45-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 +// CHECK_R45-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK_R45-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK_R45-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK_R45-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK_R45-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] +// CHECK_R45-NEXT: ret void +// +// CHECK__64-LABEL: define {{[^@]+}}@__omp_outlined__2 +// CHECK__64-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK__64-NEXT: entry: +// CHECK__64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK__64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK__64-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK__64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 +// CHECK__64-NEXT: store i32 44, i32* [[A]], align 4 +// CHECK__64-NEXT: ret void +// +// CHECK__32-LABEL: define {{[^@]+}}@__omp_outlined__2 +// CHECK__32-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK__32-NEXT: entry: +// CHECK__32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK__32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK__32-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK__32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK__32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 +// CHECK__32-NEXT: store i32 44, i32* [[A]], align 4 +// CHECK__32-NEXT: ret void +// +// CHECK__64-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper +// CHECK__64-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { +// CHECK__64-NEXT: entry: +// CHECK__64-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK__64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 +// CHECK__64-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK__64-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK__64-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK__64-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK__64-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] +// CHECK__64-NEXT: ret void +// +// CHECK__R3-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper +// CHECK__R3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] { +// CHECK__R3-NEXT: entry: +// CHECK__R3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK__R3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK__R3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK__R3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 +// CHECK__R3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK__R3-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK__R3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK__R3-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK__R3-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR1]] +// CHECK__R3-NEXT: ret void +// +// CHECK_R45-LABEL: define {{[^@]+}}@__omp_outlined__2_wrapper +// CHECK_R45-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { +// CHECK_R45-NEXT: entry: +// CHECK_R45-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK_R45-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK_R45-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK_R45-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 +// CHECK_R45-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK_R45-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK_R45-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK_R45-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK_R45-NEXT: call void @__omp_outlined__2(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] +// CHECK_R45-NEXT: ret void +// +// CHECK__64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46 +// CHECK__64-SAME: (i64 [[N:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { +// CHECK__64-NEXT: entry: +// CHECK__64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 +// CHECK__64-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK__64-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 +// CHECK__64-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 +// CHECK__64-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 8 +// CHECK__64-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8 +// CHECK__64-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 +// CHECK__64-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 +// CHECK__64-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 +// CHECK__64-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* +// CHECK__64-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_ADDR]] to i32* +// CHECK__64-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_ADDR]] to i16* +// CHECK__64-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 +// CHECK__64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) +// CHECK__64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1 +// CHECK__64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] +// CHECK__64: user_code.entry: +// CHECK__64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) +// CHECK__64-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 +// CHECK__64-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1000 +// CHECK__64-NEXT: [[TMP4:%.*]] = zext i1 [[CMP]] to i32 +// CHECK__64-NEXT: [[TMP5:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** +// CHECK__64-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 [[TMP4]], i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8** [[TMP5]], i64 0) +// CHECK__64-NEXT: [[TMP6:%.*]] = load i32, i32* [[CONV1]], align 8 +// CHECK__64-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1 +// CHECK__64-NEXT: store i32 [[ADD]], i32* [[CONV1]], align 8 +// CHECK__64-NEXT: [[TMP7:%.*]] = load i16, i16* [[CONV2]], align 8 +// CHECK__64-NEXT: [[CONV3:%.*]] = sext i16 [[TMP7]] to i32 +// CHECK__64-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 +// CHECK__64-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 +// CHECK__64-NEXT: store i16 [[CONV5]], i16* [[CONV2]], align 8 +// CHECK__64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 +// CHECK__64-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK__64-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK__64-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4 +// CHECK__64-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) +// CHECK__64-NEXT: ret void +// CHECK__64: worker.exit: +// CHECK__64-NEXT: ret void +// +// CHECK__32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46 +// CHECK__32-SAME: (i32 [[N:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { +// CHECK__32-NEXT: entry: +// CHECK__32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 +// CHECK__32-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK__32-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 +// CHECK__32-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 +// CHECK__32-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x i8*], align 4 +// CHECK__32-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 +// CHECK__32-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 +// CHECK__32-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 +// CHECK__32-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 +// CHECK__32-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* +// CHECK__32-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 +// CHECK__32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) +// CHECK__32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1 +// CHECK__32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] +// CHECK__32: user_code.entry: +// CHECK__32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) +// CHECK__32-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 +// CHECK__32-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1000 +// CHECK__32-NEXT: [[TMP4:%.*]] = zext i1 [[CMP]] to i32 +// CHECK__32-NEXT: [[TMP5:%.*]] = bitcast [0 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** +// CHECK__32-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 [[TMP4]], i32 -1, i32 -1, i8* bitcast (void (i32*, i32*)* @__omp_outlined__3 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__3_wrapper to i8*), i8** [[TMP5]], i32 0) +// CHECK__32-NEXT: [[TMP6:%.*]] = load i32, i32* [[A_ADDR]], align 4 +// CHECK__32-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], 1 +// CHECK__32-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 +// CHECK__32-NEXT: [[TMP7:%.*]] = load i16, i16* [[CONV]], align 4 +// CHECK__32-NEXT: [[CONV1:%.*]] = sext i16 [[TMP7]] to i32 +// CHECK__32-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV1]], 1 +// CHECK__32-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 +// CHECK__32-NEXT: store i16 [[CONV3]], i16* [[CONV]], align 4 +// CHECK__32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 +// CHECK__32-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK__32-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK__32-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4 +// CHECK__32-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) +// CHECK__32-NEXT: ret void +// CHECK__32: worker.exit: +// CHECK__32-NEXT: ret void +// +// CHECK__64-LABEL: define {{[^@]+}}@__omp_outlined__3 +// CHECK__64-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK__64-NEXT: entry: +// CHECK__64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK__64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK__64-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK__64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 +// CHECK__64-NEXT: store i32 45, i32* [[A]], align 4 +// CHECK__64-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK__64-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 +// CHECK__64-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]]) +// CHECK__64-NEXT: ret void +// +// CHECK__32-LABEL: define {{[^@]+}}@__omp_outlined__3 +// CHECK__32-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { +// CHECK__32-NEXT: entry: +// CHECK__32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK__32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK__32-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK__32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK__32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 +// CHECK__32-NEXT: store i32 45, i32* [[A]], align 4 +// CHECK__32-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK__32-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 +// CHECK__32-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1]]) +// CHECK__32-NEXT: ret void +// +// CHECK__64-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper +// CHECK__64-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { +// CHECK__64-NEXT: entry: +// CHECK__64-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK__64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 +// CHECK__64-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK__64-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK__64-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK__64-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK__64-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] +// CHECK__64-NEXT: ret void +// +// CHECK__R3-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper +// CHECK__R3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] { +// CHECK__R3-NEXT: entry: +// CHECK__R3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK__R3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK__R3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK__R3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 +// CHECK__R3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK__R3-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK__R3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK__R3-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK__R3-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR1]] +// CHECK__R3-NEXT: ret void +// +// CHECK_R45-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper +// CHECK_R45-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { +// CHECK_R45-NEXT: entry: +// CHECK_R45-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK_R45-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK_R45-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK_R45-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 +// CHECK_R45-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK_R45-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK_R45-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK_R45-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK_R45-NEXT: call void @__omp_outlined__3(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]]) #[[ATTR2]] +// CHECK_R45-NEXT: ret void +// +// CHECK__R1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l58 +// CHECK__R1-SAME: (i64 [[A:%.*]]) #[[ATTR0]] { +// CHECK__R1-NEXT: entry: +// CHECK__R1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK__R1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8 +// CHECK__R1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 +// CHECK__R1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* +// CHECK__R1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) +// CHECK__R1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 +// CHECK__R1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] +// CHECK__R1: user_code.entry: +// CHECK__R1-NEXT: [[TMP1:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2 +// CHECK__R1-NEXT: [[TMP2:%.*]] = load i64, i64* @"_openmp_static_kernel$size", align 8 +// CHECK__R1-NEXT: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i64 [[TMP2]], i16 [[TMP1]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**)) +// CHECK__R1-NEXT: [[TMP3:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 8 +// CHECK__R1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[TMP3]], i64 0 +// CHECK__R1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct._globalized_locals_ty* +// CHECK__R1-NEXT: [[TMP6:%.*]] = load i32, i32* [[CONV]], align 8 +// CHECK__R1-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP5]], i32 0, i32 0 +// CHECK__R1-NEXT: store i32 [[TMP6]], i32* [[A1]], align 4 +// CHECK__R1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) +// CHECK__R1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 +// CHECK__R1-NEXT: [[TMP9:%.*]] = bitcast i32* [[A1]] to i8* +// CHECK__R1-NEXT: store i8* [[TMP9]], i8** [[TMP8]], align 8 +// CHECK__R1-NEXT: [[TMP10:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** +// CHECK__R1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8** [[TMP10]], i64 1) +// CHECK__R1-NEXT: [[TMP11:%.*]] = load i32, i32* [[A1]], align 4 +// CHECK__R1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1 +// CHECK__R1-NEXT: store i32 [[INC]], i32* [[A1]], align 4 +// CHECK__R1-NEXT: [[TMP12:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2 +// CHECK__R1-NEXT: call void @__kmpc_restore_team_static_memory(i16 0, i16 [[TMP12]]) +// CHECK__R1-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) +// CHECK__R1-NEXT: ret void +// CHECK__R1: worker.exit: +// CHECK__R1-NEXT: ret void +// +// CHECK__R2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l58 +// CHECK__R2-SAME: (i64 [[A:%.*]]) #[[ATTR0]] { +// CHECK__R2-NEXT: entry: +// CHECK__R2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// CHECK__R2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8 +// CHECK__R2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 +// CHECK__R2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* +// CHECK__R2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) +// CHECK__R2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 +// CHECK__R2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] +// CHECK__R2: user_code.entry: +// CHECK__R2-NEXT: [[TMP1:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i64 4, i16 1) +// CHECK__R2-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct._globalized_locals_ty* +// CHECK__R2-NEXT: [[TMP3:%.*]] = load i32, i32* [[CONV]], align 8 +// CHECK__R2-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP2]], i32 0, i32 0 +// CHECK__R2-NEXT: store i32 [[TMP3]], i32* [[A1]], align 4 +// CHECK__R2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) +// CHECK__R2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 +// CHECK__R2-NEXT: [[TMP6:%.*]] = bitcast i32* [[A1]] to i8* +// CHECK__R2-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8 +// CHECK__R2-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** +// CHECK__R2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8** [[TMP7]], i64 1) +// CHECK__R2-NEXT: [[TMP8:%.*]] = load i32, i32* [[A1]], align 4 +// CHECK__R2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK__R2-NEXT: store i32 [[INC]], i32* [[A1]], align 4 +// CHECK__R2-NEXT: call void @__kmpc_data_sharing_pop_stack(i8* [[TMP1]]) +// CHECK__R2-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) +// CHECK__R2-NEXT: ret void +// CHECK__R2: worker.exit: +// CHECK__R2-NEXT: ret void +// +// CHECK__R3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l58 +// CHECK__R3-SAME: (i32 [[A:%.*]]) #[[ATTR0]] { +// CHECK__R3-NEXT: entry: +// CHECK__R3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK__R3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4 +// CHECK__R3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 +// CHECK__R3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) +// CHECK__R3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 +// CHECK__R3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] +// CHECK__R3: user_code.entry: +// CHECK__R3-NEXT: [[TMP1:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2 +// CHECK__R3-NEXT: [[TMP2:%.*]] = load i32, i32* @"_openmp_static_kernel$size", align 4 +// CHECK__R3-NEXT: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i32 [[TMP2]], i16 [[TMP1]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**)) +// CHECK__R3-NEXT: [[TMP3:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 4 +// CHECK__R3-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[TMP3]], i32 0 +// CHECK__R3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct._globalized_locals_ty* +// CHECK__R3-NEXT: [[TMP6:%.*]] = load i32, i32* [[A_ADDR]], align 4 +// CHECK__R3-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP5]], i32 0, i32 0 +// CHECK__R3-NEXT: store i32 [[TMP6]], i32* [[A1]], align 4 +// CHECK__R3-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) +// CHECK__R3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0 +// CHECK__R3-NEXT: [[TMP9:%.*]] = bitcast i32* [[A1]] to i8* +// CHECK__R3-NEXT: store i8* [[TMP9]], i8** [[TMP8]], align 4 +// CHECK__R3-NEXT: [[TMP10:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** +// CHECK__R3-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8** [[TMP10]], i32 1) +// CHECK__R3-NEXT: [[TMP11:%.*]] = load i32, i32* [[A1]], align 4 +// CHECK__R3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1 +// CHECK__R3-NEXT: store i32 [[INC]], i32* [[A1]], align 4 +// CHECK__R3-NEXT: [[TMP12:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2 +// CHECK__R3-NEXT: call void @__kmpc_restore_team_static_memory(i16 0, i16 [[TMP12]]) +// CHECK__R3-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) +// CHECK__R3-NEXT: ret void +// CHECK__R3: worker.exit: +// CHECK__R3-NEXT: ret void +// +// CHECK_R45-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l58 +// CHECK_R45-SAME: (i32 [[A:%.*]]) #[[ATTR0]] { +// CHECK_R45-NEXT: entry: +// CHECK_R45-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// CHECK_R45-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 4 +// CHECK_R45-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 +// CHECK_R45-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1]], i1 false, i1 true, i1 true) +// CHECK_R45-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 +// CHECK_R45-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] +// CHECK_R45: user_code.entry: +// CHECK_R45-NEXT: [[TMP1:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i32 4, i16 1) +// CHECK_R45-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct._globalized_locals_ty* +// CHECK_R45-NEXT: [[TMP3:%.*]] = load i32, i32* [[A_ADDR]], align 4 +// CHECK_R45-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP2]], i32 0, i32 0 +// CHECK_R45-NEXT: store i32 [[TMP3]], i32* [[A1]], align 4 +// CHECK_R45-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]]) +// CHECK_R45-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i32 0, i32 0 +// CHECK_R45-NEXT: [[TMP6:%.*]] = bitcast i32* [[A1]] to i8* +// CHECK_R45-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4 +// CHECK_R45-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** +// CHECK_R45-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__4 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__4_wrapper to i8*), i8** [[TMP7]], i32 1) +// CHECK_R45-NEXT: [[TMP8:%.*]] = load i32, i32* [[A1]], align 4 +// CHECK_R45-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK_R45-NEXT: store i32 [[INC]], i32* [[A1]], align 4 +// CHECK_R45-NEXT: call void @__kmpc_data_sharing_pop_stack(i8* [[TMP1]]) +// CHECK_R45-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i1 false, i1 true) +// CHECK_R45-NEXT: ret void +// CHECK_R45: worker.exit: +// CHECK_R45-NEXT: ret void +// +// CHECK__64-LABEL: define {{[^@]+}}@__omp_outlined__4 +// CHECK__64-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR0]] { +// CHECK__64-NEXT: entry: +// CHECK__64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK__64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK__64-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 +// CHECK__64-NEXT: [[CRITICAL_COUNTER:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK__64-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 +// CHECK__64-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 +// CHECK__64-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8 +// CHECK__64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_warp_active_thread_mask() +// CHECK__64-NEXT: [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() +// CHECK__64-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() +// CHECK__64-NEXT: store i32 0, i32* [[CRITICAL_COUNTER]], align 4 +// CHECK__64-NEXT: br label [[OMP_CRITICAL_LOOP:%.*]] +// CHECK__64: omp.critical.loop: +// CHECK__64-NEXT: [[TMP2:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 +// CHECK__64-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], [[NVPTX_NUM_THREADS]] +// CHECK__64-NEXT: br i1 [[TMP3]], label [[OMP_CRITICAL_TEST:%.*]], label [[OMP_CRITICAL_EXIT:%.*]] +// CHECK__64: omp.critical.test: +// CHECK__64-NEXT: [[TMP4:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 +// CHECK__64-NEXT: [[TMP5:%.*]] = icmp eq i32 [[NVPTX_TID]], [[TMP4]] +// CHECK__64-NEXT: br i1 [[TMP5]], label [[OMP_CRITICAL_BODY:%.*]], label [[OMP_CRITICAL_SYNC:%.*]] +// CHECK__64: omp.critical.body: +// CHECK__64-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK__64-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4 +// CHECK__64-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") +// CHECK__64-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4 +// CHECK__64-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK__64-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4 +// CHECK__64-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") +// CHECK__64-NEXT: br label [[OMP_CRITICAL_SYNC]] +// CHECK__64: omp.critical.sync: +// CHECK__64-NEXT: call void @__kmpc_syncwarp(i32 [[TMP1]]) +// CHECK__64-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP4]], 1 +// CHECK__64-NEXT: store i32 [[TMP9]], i32* [[CRITICAL_COUNTER]], align 4 +// CHECK__64-NEXT: br label [[OMP_CRITICAL_LOOP]] +// CHECK__64: omp.critical.exit: +// CHECK__64-NEXT: ret void +// +// CHECK__32-LABEL: define {{[^@]+}}@__omp_outlined__4 +// CHECK__32-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR0]] { +// CHECK__32-NEXT: entry: +// CHECK__32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK__32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK__32-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 +// CHECK__32-NEXT: [[CRITICAL_COUNTER:%.*]] = alloca i32, align 4 +// CHECK__32-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK__32-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 +// CHECK__32-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 +// CHECK__32-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 4 +// CHECK__32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_warp_active_thread_mask() +// CHECK__32-NEXT: [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() +// CHECK__32-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() +// CHECK__32-NEXT: store i32 0, i32* [[CRITICAL_COUNTER]], align 4 +// CHECK__32-NEXT: br label [[OMP_CRITICAL_LOOP:%.*]] +// CHECK__32: omp.critical.loop: +// CHECK__32-NEXT: [[TMP2:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 +// CHECK__32-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], [[NVPTX_NUM_THREADS]] +// CHECK__32-NEXT: br i1 [[TMP3]], label [[OMP_CRITICAL_TEST:%.*]], label [[OMP_CRITICAL_EXIT:%.*]] +// CHECK__32: omp.critical.test: +// CHECK__32-NEXT: [[TMP4:%.*]] = load i32, i32* [[CRITICAL_COUNTER]], align 4 +// CHECK__32-NEXT: [[TMP5:%.*]] = icmp eq i32 [[NVPTX_TID]], [[TMP4]] +// CHECK__32-NEXT: br i1 [[TMP5]], label [[OMP_CRITICAL_BODY:%.*]], label [[OMP_CRITICAL_SYNC:%.*]] +// CHECK__32: omp.critical.body: +// CHECK__32-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK__32-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4 +// CHECK__32-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") +// CHECK__32-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP0]], align 4 +// CHECK__32-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK__32-NEXT: store i32 [[INC]], i32* [[TMP0]], align 4 +// CHECK__32-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB1]], i32 [[TMP7]], [8 x i32]* @"_gomp_critical_user_$var") +// CHECK__32-NEXT: br label [[OMP_CRITICAL_SYNC]] +// CHECK__32: omp.critical.sync: +// CHECK__32-NEXT: call void @__kmpc_syncwarp(i32 [[TMP1]]) +// CHECK__32-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP4]], 1 +// CHECK__32-NEXT: store i32 [[TMP9]], i32* [[CRITICAL_COUNTER]], align 4 +// CHECK__32-NEXT: br label [[OMP_CRITICAL_LOOP]] +// CHECK__32: omp.critical.exit: +// CHECK__32-NEXT: ret void +// +// CHECK__64-LABEL: define {{[^@]+}}@__omp_outlined__4_wrapper +// CHECK__64-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { +// CHECK__64-NEXT: entry: +// CHECK__64-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK__64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK__64-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 +// CHECK__64-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK__64-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK__64-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK__64-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK__64-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8 +// CHECK__64-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0 +// CHECK__64-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32** +// CHECK__64-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8 +// CHECK__64-NEXT: call void @__omp_outlined__4(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR2]] +// CHECK__64-NEXT: ret void +// +// CHECK__R3-LABEL: define {{[^@]+}}@__omp_outlined__4_wrapper +// CHECK__R3-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] { +// CHECK__R3-NEXT: entry: +// CHECK__R3-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK__R3-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK__R3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK__R3-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 +// CHECK__R3-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK__R3-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK__R3-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK__R3-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK__R3-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 4 +// CHECK__R3-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i32 0 +// CHECK__R3-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32** +// CHECK__R3-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 4 +// CHECK__R3-NEXT: call void @__omp_outlined__4(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR1]] +// CHECK__R3-NEXT: ret void +// +// CHECK_R45-LABEL: define {{[^@]+}}@__omp_outlined__4_wrapper +// CHECK_R45-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR1]] { +// CHECK_R45-NEXT: entry: +// CHECK_R45-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 +// CHECK_R45-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK_R45-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 +// CHECK_R45-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 4 +// CHECK_R45-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 +// CHECK_R45-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 +// CHECK_R45-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK_R45-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) +// CHECK_R45-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 4 +// CHECK_R45-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i32 0 +// CHECK_R45-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32** +// CHECK_R45-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 4 +// CHECK_R45-NEXT: call void @__omp_outlined__4(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR2]] +// CHECK_R45-NEXT: ret void //